aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 09:22:35 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 16:09:40 +0200
commit8b25d1ad5d2264bdfc2818c7bda74ee2697df6db (patch)
tree8c3c769777f7e66a2d1ba7dd7651b563cfde370b
parent97f17497d162afdb82c8704bf097f0fee3724b2e (diff)
Imported Upstream version 16.07-rc1
Change-Id: I40a523e52f12e8496fdd69e902824b0226c303de Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
-rw-r--r--MAINTAINERS37
-rw-r--r--app/Makefile1
-rw-r--r--app/pdump/Makefile49
-rw-r--r--app/pdump/main.c844
-rw-r--r--app/proc_info/main.c28
-rw-r--r--app/test-acl/main.c2
-rw-r--r--app/test-pmd/Makefile5
-rw-r--r--app/test-pmd/cmdline.c260
-rw-r--r--app/test-pmd/config.c166
-rw-r--r--app/test-pmd/csumonly.c15
-rw-r--r--app/test-pmd/flowgen.c25
-rw-r--r--app/test-pmd/icmpecho.c18
-rw-r--r--app/test-pmd/iofwd.c22
-rw-r--r--app/test-pmd/macfwd-retry.c164
-rw-r--r--app/test-pmd/macfwd.c16
-rw-r--r--app/test-pmd/macswap.c15
-rw-r--r--app/test-pmd/mempool_anon.c201
-rw-r--r--app/test-pmd/parameters.c3
-rw-r--r--app/test-pmd/rxonly.c7
-rw-r--r--app/test-pmd/testpmd.c173
-rw-r--r--app/test-pmd/testpmd.h18
-rw-r--r--app/test-pmd/txonly.c26
-rw-r--r--app/test/Makefile50
-rw-r--r--app/test/autotest_data.py51
-rw-r--r--app/test/autotest_test_funcs.py30
-rw-r--r--app/test/commands.c6
-rw-r--r--app/test/resource.c305
-rw-r--r--app/test/resource.h135
-rw-r--r--app/test/test.h64
-rw-r--r--app/test/test_alarm.c48
-rw-r--r--app/test/test_cmdline_string.c15
-rw-r--r--app/test/test_cryptodev.c2451
-rw-r--r--app/test/test_cryptodev.h5
-rw-r--r--app/test/test_cryptodev_aes.c683
-rw-r--r--app/test/test_cryptodev_aes.h1124
-rw-r--r--app/test/test_cryptodev_kasumi_hash_test_vectors.h260
-rw-r--r--app/test/test_cryptodev_kasumi_test_vectors.h308
-rw-r--r--app/test/test_cryptodev_perf.c1156
-rw-r--r--app/test/test_cryptodev_snow3g_hash_test_vectors.h107
-rw-r--r--app/test/test_hash.c12
-rw-r--r--app/test/test_hash_multiwriter.c287
-rw-r--r--app/test/test_interrupts.c4
-rw-r--r--app/test/test_link_bonding.c2
-rw-r--r--app/test/test_link_bonding_mode4.c215
-rw-r--r--app/test/test_logs.c28
-rw-r--r--app/test/test_lpm.c211
-rw-r--r--app/test/test_lpm6.c192
-rw-r--r--app/test/test_lpm6_perf.c191
-rw-r--r--app/test/test_lpm_perf.c249
-rw-r--r--app/test/test_mbuf.c10
-rw-r--r--app/test/test_memcpy.c15
-rw-r--r--app/test/test_mempool.c346
-rw-r--r--app/test/test_mempool_perf.c82
-rw-r--r--app/test/test_mp_secondary.c4
-rw-r--r--app/test/test_pci.c164
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/class1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/configbin0 -> 64 bytes
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/consistent_dma_mask_bits1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/dma_mask_bits1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/enable1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/irq1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/modalias1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/msi_bus1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/numa_node1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/resource13
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_numvfs1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_totalvfs1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/uevent6
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/class1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/resource13
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/class1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/resource13
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/vendor1
-rw-r--r--app/test/test_per_lcore.c4
-rw-r--r--app/test/test_pmd_perf.c3
-rw-r--r--app/test/test_red.c89
-rw-r--r--app/test/test_resource.c137
-rw-r--r--app/test/test_ring.c8
-rw-r--r--app/test/test_spinlock.c6
-rw-r--r--app/test/test_timer.c20
-rw-r--r--config/common_base52
-rw-r--r--config/common_linuxapp1
-rw-r--r--config/defconfig_arm-armv7a-linuxapp-gcc1
-rw-r--r--config/defconfig_arm64-armv8a-linuxapp-gcc3
-rw-r--r--config/defconfig_arm64-dpaa2-linuxapp-gcc42
-rw-r--r--config/defconfig_arm64-thunderx-linuxapp-gcc12
-rw-r--r--config/defconfig_i686-native-linuxapp-gcc5
-rw-r--r--config/defconfig_i686-native-linuxapp-icc5
-rw-r--r--config/defconfig_ppc_64-power8-linuxapp-gcc2
-rw-r--r--doc/api/doxy-api-index.md1
-rw-r--r--doc/guides/contributing/coding_style.rst8
-rw-r--r--doc/guides/contributing/versioning.rst2
-rw-r--r--doc/guides/cryptodevs/aesni_mb.rst5
-rw-r--r--doc/guides/cryptodevs/index.rst3
-rw-r--r--doc/guides/cryptodevs/kasumi.rst101
-rw-r--r--doc/guides/cryptodevs/overview.rst79
-rw-r--r--doc/guides/cryptodevs/qat.rst3
-rw-r--r--doc/guides/cryptodevs/snow3g.rst52
-rw-r--r--doc/guides/faq/faq.rst7
-rw-r--r--doc/guides/freebsd_gsg/build_dpdk.rst2
-rw-r--r--doc/guides/linux_gsg/enable_func.rst15
-rw-r--r--doc/guides/linux_gsg/sys_reqs.rst3
-rw-r--r--doc/guides/nics/bnxt.rst49
-rw-r--r--doc/guides/nics/fm10k.rst5
-rw-r--r--doc/guides/nics/i40e.rst45
-rw-r--r--doc/guides/nics/index.rst3
-rw-r--r--doc/guides/nics/mlx5.rst96
-rw-r--r--doc/guides/nics/nfp.rst47
-rw-r--r--doc/guides/nics/overview.rst153
-rw-r--r--doc/guides/nics/qede.rst314
-rw-r--r--doc/guides/nics/thunderx.rst354
-rw-r--r--doc/guides/prog_guide/env_abstraction_layer.rst4
-rw-r--r--doc/guides/prog_guide/index.rst1
-rw-r--r--doc/guides/prog_guide/ivshmem_lib.rst2
-rw-r--r--doc/guides/prog_guide/mempool_lib.rst42
-rw-r--r--doc/guides/prog_guide/pdump_lib.rst123
-rw-r--r--doc/guides/prog_guide/poll_mode_drv.rst21
-rw-r--r--doc/guides/prog_guide/vhost_lib.rst223
-rw-r--r--doc/guides/rel_notes/deprecation.rst79
-rw-r--r--doc/guides/rel_notes/index.rst1
-rw-r--r--doc/guides/rel_notes/known_issues.rst40
-rw-r--r--doc/guides/rel_notes/release_16_07.rst358
-rw-r--r--doc/guides/sample_app_ug/img/ipsec_endpoints.svg850
-rw-r--r--doc/guides/sample_app_ug/index.rst1
-rw-r--r--doc/guides/sample_app_ug/ip_pipeline.rst112
-rw-r--r--doc/guides/sample_app_ug/ipsec_secgw.rst910
-rw-r--r--doc/guides/sample_app_ug/ipv4_multicast.rst4
-rw-r--r--doc/guides/sample_app_ug/l2_forward_crypto.rst4
-rw-r--r--doc/guides/sample_app_ug/l2_forward_job_stats.rst3
-rw-r--r--doc/guides/sample_app_ug/l2_forward_real_virtual.rst3
-rw-r--r--doc/guides/sample_app_ug/l3_forward.rst2
-rw-r--r--doc/guides/sample_app_ug/link_status_intr.rst3
-rw-r--r--doc/guides/sample_app_ug/pdump.rst122
-rw-r--r--doc/guides/sample_app_ug/vhost.rst53
-rw-r--r--doc/guides/testpmd_app_ug/run_app.rst5
-rw-r--r--doc/guides/testpmd_app_ug/testpmd_funcs.rst68
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/aesni_gcm/Makefile7
-rw-r--r--drivers/crypto/aesni_mb/Makefile5
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c9
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c20
-rw-r--r--drivers/crypto/kasumi/Makefile69
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c657
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c344
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_private.h106
-rw-r--r--drivers/crypto/kasumi/rte_pmd_kasumi_version.map3
-rw-r--r--drivers/crypto/null/Makefile2
-rw-r--r--drivers/crypto/qat/Makefile2
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c7
-rw-r--r--drivers/crypto/qat/qat_crypto.c45
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c5
-rw-r--r--drivers/crypto/snow3g/Makefile17
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c215
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/af_packet/Makefile2
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c20
-rw-r--r--drivers/net/bnx2x/Makefile4
-rw-r--r--drivers/net/bnx2x/bnx2x.c222
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c16
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c66
-rw-r--r--drivers/net/bnx2x/elink.c8
-rw-r--r--drivers/net/bnxt/Makefile74
-rw-r--r--drivers/net/bnxt/bnxt.h176
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c159
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h86
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c1049
-rw-r--r--drivers/net/bnxt/bnxt_filter.c175
-rw-r--r--drivers/net/bnxt/bnxt_filter.h74
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c1491
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h104
-rw-r--r--drivers/net/bnxt/bnxt_ring.c306
-rw-r--r--drivers/net/bnxt/bnxt_ring.h103
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c319
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h74
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c367
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h (renamed from app/test-pmd/mempool_osdep.h)48
-rw-r--r--drivers/net/bnxt/bnxt_stats.c142
-rw-r--r--drivers/net/bnxt/bnxt_stats.h (renamed from lib/librte_vhost/virtio-net.h)17
-rw-r--r--drivers/net/bnxt/bnxt_txq.c162
-rw-r--r--drivers/net/bnxt/bnxt_txq.h75
-rw-r--r--drivers/net/bnxt/bnxt_txr.c339
-rw-r--r--drivers/net/bnxt/bnxt_txr.h71
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c277
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h80
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h5799
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt_version.map4
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c336
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.h83
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad_private.h14
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c8
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c30
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c11
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h3
-rw-r--r--drivers/net/bonding/rte_eth_bond_version.map16
-rw-r--r--drivers/net/cxgbe/Makefile8
-rw-r--r--drivers/net/cxgbe/base/adapter.h138
-rw-r--r--drivers/net/cxgbe/base/common.h11
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c1047
-rw-r--r--drivers/net/cxgbe/base/t4_hw.h5
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c168
-rw-r--r--drivers/net/e1000/Makefile6
-rw-r--r--drivers/net/e1000/base/e1000_phy.c6
-rw-r--r--drivers/net/e1000/em_rxtx.c19
-rw-r--r--drivers/net/e1000/igb_ethdev.c290
-rw-r--r--drivers/net/e1000/igb_rxtx.c19
-rw-r--r--drivers/net/ena/Makefile2
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h10
-rw-r--r--drivers/net/ena/ena_ethdev.c2
-rw-r--r--drivers/net/ena/ena_ethdev.h1
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/base/enic_vnic_wq.h79
-rw-r--r--drivers/net/enic/base/rq_enet_desc.h2
-rw-r--r--drivers/net/enic/base/vnic_cq.h44
-rw-r--r--drivers/net/enic/base/vnic_dev.c14
-rw-r--r--drivers/net/enic/base/vnic_dev.h2
-rw-r--r--drivers/net/enic/base/vnic_enet.h17
-rw-r--r--drivers/net/enic/base/vnic_rq.c8
-rw-r--r--drivers/net/enic/base/vnic_rq.h18
-rw-r--r--drivers/net/enic/base/vnic_wq.c80
-rw-r--r--drivers/net/enic/base/vnic_wq.h118
-rw-r--r--drivers/net/enic/enic.h94
-rw-r--r--drivers/net/enic/enic_clsf.c16
-rw-r--r--drivers/net/enic/enic_ethdev.c90
-rw-r--r--drivers/net/enic/enic_main.c592
-rw-r--r--drivers/net/enic/enic_res.c30
-rw-r--r--drivers/net/enic/enic_res.h84
-rw-r--r--drivers/net/enic/enic_rx.c359
-rw-r--r--drivers/net/enic/enic_rxtx.c547
-rw-r--r--drivers/net/fm10k/Makefile6
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c59
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c16
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c13
-rw-r--r--drivers/net/i40e/Makefile13
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c91
-rw-r--r--drivers/net/i40e/base/i40e_adminq.h5
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h54
-rw-r--r--drivers/net/i40e/base/i40e_common.c133
-rw-r--r--drivers/net/i40e/base/i40e_devids.h4
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c86
-rw-r--r--drivers/net/i40e/base/i40e_osdep.h7
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h16
-rw-r--r--drivers/net/i40e/base/i40e_type.h42
-rw-r--r--drivers/net/i40e/base/i40e_virtchnl.h45
-rw-r--r--drivers/net/i40e/i40e_ethdev.c492
-rw-r--r--drivers/net/i40e/i40e_ethdev.h12
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c58
-rw-r--r--drivers/net/i40e/i40e_fdir.c27
-rw-r--r--drivers/net/i40e/i40e_pf.c25
-rw-r--r--drivers/net/i40e/i40e_rxtx.c82
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec.c84
-rw-r--r--drivers/net/ixgbe/Makefile10
-rw-r--r--drivers/net/ixgbe/base/README2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.c5
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.h3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c9
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c41
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.h8
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c361
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.h9
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.h4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_osdep.h1
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c16
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.h3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h118
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.c10
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.h7
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.c29
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.h1
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c1158
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.h52
-rw-r--r--drivers/net/ixgbe/ixgbe_82599_bypass.c8
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_bypass_defines.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c527
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c68
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c38
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c499
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h326
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c560
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c (renamed from drivers/net/ixgbe/ixgbe_rxtx_vec.c)314
-rw-r--r--drivers/net/mlx4/Makefile15
-rw-r--r--drivers/net/mlx4/mlx4.c218
-rw-r--r--drivers/net/mlx5/Makefile65
-rw-r--r--drivers/net/mlx5/mlx5.c200
-rw-r--r--drivers/net/mlx5/mlx5.h14
-rw-r--r--drivers/net/mlx5/mlx5_defs.h26
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c249
-rw-r--r--drivers/net/mlx5/mlx5_fdir.c20
-rw-r--r--drivers/net/mlx5/mlx5_mac.c1
-rw-r--r--drivers/net/mlx5/mlx5_mr.c283
-rw-r--r--drivers/net/mlx5/mlx5_prm.h163
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c16
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c786
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c2200
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h188
-rw-r--r--drivers/net/mlx5/mlx5_txq.c370
-rw-r--r--drivers/net/mlx5/mlx5_vlan.c8
-rw-r--r--drivers/net/mpipe/Makefile2
-rw-r--r--drivers/net/mpipe/mpipe_tilegx.c4
-rw-r--r--drivers/net/nfp/Makefile2
-rw-r--r--drivers/net/nfp/nfp_net.c64
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h1
-rw-r--r--drivers/net/null/Makefile2
-rw-r--r--drivers/net/null/rte_eth_null.c4
-rw-r--r--drivers/net/pcap/Makefile2
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c4
-rw-r--r--drivers/net/qede/LICENSE.qede_pmd28
-rw-r--r--drivers/net/qede/Makefile106
-rw-r--r--drivers/net/qede/base/bcm_osal.c202
-rw-r--r--drivers/net/qede/base/bcm_osal.h403
-rw-r--r--drivers/net/qede/base/common_hsi.h714
-rw-r--r--drivers/net/qede/base/ecore.h754
-rw-r--r--drivers/net/qede/base/ecore_attn_values.h13287
-rw-r--r--drivers/net/qede/base/ecore_chain.h724
-rw-r--r--drivers/net/qede/base/ecore_cxt.c1961
-rw-r--r--drivers/net/qede/base/ecore_cxt.h157
-rw-r--r--drivers/net/qede/base/ecore_cxt_api.h79
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c887
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h55
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h160
-rw-r--r--drivers/net/qede/base/ecore_dev.c3597
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h497
-rw-r--r--drivers/net/qede/base/ecore_gtt_reg_addr.h42
-rw-r--r--drivers/net/qede/base/ecore_gtt_values.h33
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h1912
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h1912
-rw-r--r--drivers/net/qede/base/ecore_hsi_tools.h1081
-rw-r--r--drivers/net/qede/base/ecore_hw.c910
-rw-r--r--drivers/net/qede/base/ecore_hw.h269
-rw-r--r--drivers/net/qede/base/ecore_hw_defs.h49
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c1275
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h263
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c599
-rw-r--r--drivers/net/qede/base/ecore_init_ops.h103
-rw-r--r--drivers/net/qede/base/ecore_int.c2225
-rw-r--r--drivers/net/qede/base/ecore_int.h234
-rw-r--r--drivers/net/qede/base/ecore_int_api.h277
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h933
-rw-r--r--drivers/net/qede/base/ecore_iro.h115
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h59
-rw-r--r--drivers/net/qede/base/ecore_l2.c1807
-rw-r--r--drivers/net/qede/base/ecore_l2.h151
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h401
-rw-r--r--drivers/net/qede/base/ecore_mcp.c1932
-rw-r--r--drivers/net/qede/base/ecore_mcp.h304
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h611
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h28
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h446
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h42
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c525
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h137
-rw-r--r--drivers/net/qede/base/ecore_spq.c943
-rw-r--r--drivers/net/qede/base/ecore_spq.h284
-rw-r--r--drivers/net/qede/base/ecore_sriov.c3422
-rw-r--r--drivers/net/qede/base/ecore_sriov.h390
-rw-r--r--drivers/net/qede/base/ecore_status.h30
-rw-r--r--drivers/net/qede/base/ecore_utils.h31
-rw-r--r--drivers/net/qede/base/ecore_vf.c1332
-rw-r--r--drivers/net/qede/base/ecore_vf.h415
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h200
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h590
-rw-r--r--drivers/net/qede/base/eth_common.h526
-rw-r--r--drivers/net/qede/base/mcp_public.h1205
-rw-r--r--drivers/net/qede/base/nvm_cfg.h919
-rw-r--r--drivers/net/qede/base/reg_addr.h1107
-rw-r--r--drivers/net/qede/qede_eth_if.c463
-rw-r--r--drivers/net/qede/qede_eth_if.h177
-rw-r--r--drivers/net/qede/qede_ethdev.c1344
-rw-r--r--drivers/net/qede/qede_ethdev.h162
-rw-r--r--drivers/net/qede/qede_if.h164
-rw-r--r--drivers/net/qede/qede_logs.h90
-rw-r--r--drivers/net/qede/qede_main.c664
-rw-r--r--drivers/net/qede/qede_rxtx.c1389
-rw-r--r--drivers/net/qede/qede_rxtx.h179
-rw-r--r--drivers/net/qede/rte_pmd_qede_version.map4
-rw-r--r--drivers/net/szedata2/Makefile3
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c2
-rw-r--r--drivers/net/thunderx/Makefile70
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c905
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h240
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h1219
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.c418
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.h232
-rw-r--r--drivers/net/thunderx/base/nicvf_plat.h132
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c1791
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h106
-rw-r--r--drivers/net/thunderx/nicvf_logs.h83
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c599
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h101
-rw-r--r--drivers/net/thunderx/nicvf_struct.h124
-rw-r--r--drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map4
-rw-r--r--drivers/net/vhost/Makefile4
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c119
-rw-r--r--drivers/net/virtio/Makefile7
-rw-r--r--drivers/net/virtio/virtio_ethdev.c515
-rw-r--r--drivers/net/virtio/virtio_ethdev.h4
-rw-r--r--drivers/net/virtio/virtio_logs.h6
-rw-r--r--drivers/net/virtio/virtio_pci.c120
-rw-r--r--drivers/net/virtio/virtio_pci.h9
-rw-r--r--drivers/net/virtio/virtio_ring.h2
-rw-r--r--drivers/net/virtio/virtio_rxtx.c361
-rw-r--r--drivers/net/virtio/virtio_rxtx.h56
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c93
-rw-r--r--drivers/net/virtio/virtio_user/vhost.h146
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c426
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c333
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h62
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c440
-rw-r--r--drivers/net/virtio/virtqueue.h80
-rw-r--r--drivers/net/vmxnet3/Makefile4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c49
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.c19
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.h2
-rw-r--r--drivers/net/xenvirt/rte_mempool_gntalloc.c6
-rw-r--r--drivers/net/xenvirt/rte_xen_lib.c10
-rw-r--r--examples/Makefile3
-rw-r--r--examples/distributor/main.c36
-rw-r--r--examples/dpdk_qat/main.c2
-rw-r--r--examples/ethtool/ethtool-app/ethapp.c1
-rw-r--r--examples/ethtool/lib/Makefile4
-rw-r--r--examples/ethtool/lib/rte_ethtool.c12
-rw-r--r--examples/exception_path/main.c3
-rw-r--r--examples/ip_fragmentation/main.c6
-rw-r--r--examples/ip_pipeline/Makefile1
-rw-r--r--examples/ip_pipeline/app.h403
-rw-r--r--examples/ip_pipeline/config/action.cfg68
-rw-r--r--examples/ip_pipeline/config/action.sh119
-rw-r--r--examples/ip_pipeline/config/action.txt8
-rw-r--r--examples/ip_pipeline/config/edge_router_downstream.cfg30
-rw-r--r--examples/ip_pipeline/config/edge_router_downstream.sh7
-rw-r--r--examples/ip_pipeline/config/edge_router_upstream.cfg36
-rw-r--r--examples/ip_pipeline/config/edge_router_upstream.sh37
-rw-r--r--examples/ip_pipeline/config/firewall.cfg68
-rw-r--r--examples/ip_pipeline/config/firewall.sh13
-rw-r--r--examples/ip_pipeline/config/firewall.txt9
-rw-r--r--examples/ip_pipeline/config/flow.cfg72
-rw-r--r--examples/ip_pipeline/config/flow.sh25
-rw-r--r--examples/ip_pipeline/config/flow.txt17
-rw-r--r--examples/ip_pipeline/config/kni.cfg67
-rw-r--r--examples/ip_pipeline/config/l2fwd.cfg5
-rw-r--r--examples/ip_pipeline/config/l3fwd.cfg9
-rw-r--r--examples/ip_pipeline/config/l3fwd.sh32
-rw-r--r--examples/ip_pipeline/config/l3fwd_arp.cfg70
-rw-r--r--examples/ip_pipeline/config/l3fwd_arp.sh43
-rw-r--r--examples/ip_pipeline/config/network_layers.cfg223
-rw-r--r--examples/ip_pipeline/config/network_layers.sh79
-rwxr-xr-xexamples/ip_pipeline/config/pipeline-to-core-mapping.py936
-rw-r--r--examples/ip_pipeline/config_check.c58
-rw-r--r--examples/ip_pipeline/config_parse.c1448
-rw-r--r--examples/ip_pipeline/init.c302
-rw-r--r--examples/ip_pipeline/parser.c745
-rw-r--r--examples/ip_pipeline/parser.h54
-rw-r--r--examples/ip_pipeline/pipeline.h11
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_fe.c640
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_fe.h26
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall.c1463
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall.h12
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall_be.c28
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions.c1507
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions.h11
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c22
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.c2084
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.h28
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c22
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master.c2
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master_be.c22
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough.c27
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.c39
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.c1845
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.h7
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.c102
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.h16
-rw-r--r--examples/ip_pipeline/pipeline_be.h51
-rw-r--r--examples/ip_pipeline/thread_fe.c39
-rw-r--r--examples/ip_reassembly/main.c6
-rw-r--r--examples/ipsec-secgw/Makefile8
-rw-r--r--examples/ipsec-secgw/esp.c248
-rw-r--r--examples/ipsec-secgw/esp.h9
-rw-r--r--examples/ipsec-secgw/ipip.h149
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c338
-rw-r--r--examples/ipsec-secgw/ipsec.c60
-rw-r--r--examples/ipsec-secgw/ipsec.h75
-rw-r--r--examples/ipsec-secgw/rt.c229
-rw-r--r--examples/ipsec-secgw/sa.c466
-rw-r--r--examples/ipsec-secgw/sp4.c (renamed from examples/ipsec-secgw/sp.c)171
-rw-r--r--examples/ipsec-secgw/sp6.c448
-rw-r--r--examples/ipv4_multicast/main.c4
-rw-r--r--examples/kni/main.c5
-rw-r--r--examples/l2fwd-crypto/main.c25
-rw-r--r--examples/l2fwd-ivshmem/host/host.c3
-rw-r--r--examples/l2fwd-jobstats/main.c5
-rw-r--r--examples/l2fwd-keepalive/Makefile5
-rw-r--r--examples/l2fwd-keepalive/ka-agent/Makefile49
-rw-r--r--examples/l2fwd-keepalive/ka-agent/main.c150
-rw-r--r--examples/l2fwd-keepalive/main.c25
-rw-r--r--examples/l2fwd-keepalive/shm.c131
-rw-r--r--examples/l2fwd-keepalive/shm.h89
-rw-r--r--examples/l2fwd/main.c26
-rw-r--r--examples/l3fwd-acl/main.c5
-rw-r--r--examples/l3fwd-power/main.c3
-rw-r--r--examples/l3fwd-vf/main.c2
-rw-r--r--examples/l3fwd/l3fwd_em.c2
-rw-r--r--examples/l3fwd/l3fwd_em_hlm_sse.h4
-rw-r--r--examples/l3fwd/main.c2
-rw-r--r--examples/link_status_interrupt/main.c3
-rw-r--r--examples/multi_process/l2fwd_fork/main.c6
-rw-r--r--examples/netmap_compat/lib/compat_netmap.c3
-rw-r--r--examples/packet_ordering/main.c18
-rw-r--r--examples/performance-thread/common/lthread.c2
-rw-r--r--examples/performance-thread/common/lthread_int.h12
-rw-r--r--examples/performance-thread/common/lthread_mutex.c3
-rw-r--r--examples/performance-thread/common/lthread_pool.h4
-rw-r--r--examples/performance-thread/common/lthread_queue.h2
-rw-r--r--examples/performance-thread/common/lthread_sched.c2
-rw-r--r--examples/performance-thread/common/lthread_tls.c4
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c55
-rw-r--r--examples/qos_meter/main.c16
-rw-r--r--examples/qos_meter/main.h2
-rw-r--r--examples/qos_sched/args.c14
-rw-r--r--examples/qos_sched/main.c2
-rw-r--r--examples/qos_sched/main.h5
-rw-r--r--examples/quota_watermark/qw/init.c2
-rw-r--r--examples/quota_watermark/qwctl/qwctl.c2
-rw-r--r--examples/tep_termination/main.c112
-rw-r--r--examples/tep_termination/main.h13
-rw-r--r--examples/tep_termination/vxlan_setup.c20
-rw-r--r--examples/tep_termination/vxlan_setup.h6
-rw-r--r--examples/vhost/main.c2370
-rw-r--r--examples/vhost/main.h70
-rw-r--r--examples/vhost_xen/main.c66
-rw-r--r--examples/vhost_xen/main.h11
-rw-r--r--examples/vm_power_manager/channel_manager.c1
-rw-r--r--examples/vmdq/main.c2
-rw-r--r--examples/vmdq_dcb/main.c2
-rw-r--r--lib/Makefile1
-rw-r--r--lib/librte_acl/Makefile2
-rw-r--r--lib/librte_cfgfile/rte_cfgfile.c1
-rw-r--r--lib/librte_cfgfile/rte_cfgfile.h2
-rw-r--r--lib/librte_cmdline/cmdline.c10
-rw-r--r--lib/librte_cmdline/cmdline_parse.c8
-rw-r--r--lib/librte_cmdline/cmdline_parse.h3
-rw-r--r--lib/librte_cmdline/cmdline_parse_string.c43
-rw-r--r--lib/librte_cmdline/cmdline_parse_string.h15
-rw-r--r--lib/librte_cmdline/rte_cmdline_version.map1
-rw-r--r--lib/librte_cryptodev/rte_crypto_sym.h6
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.c93
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.h98
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_version.map7
-rw-r--r--lib/librte_eal/bsdapp/eal/Makefile2
-rw-r--r--lib/librte_eal/bsdapp/eal/eal.c2
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_debug.c6
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_pci.c9
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_thread.c7
-rw-r--r--lib/librte_eal/bsdapp/eal/rte_eal_version.map10
-rw-r--r--lib/librte_eal/common/eal_common_devargs.c2
-rw-r--r--lib/librte_eal/common/eal_common_lcore.c2
-rw-r--r--lib/librte_eal/common/eal_common_log.c178
-rw-r--r--lib/librte_eal/common/eal_common_memzone.c30
-rw-r--r--lib/librte_eal/common/eal_common_options.c4
-rw-r--r--lib/librte_eal/common/eal_common_pci.c26
-rw-r--r--lib/librte_eal/common/eal_common_pci_uio.c15
-rw-r--r--lib/librte_eal/common/eal_private.h3
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h6
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h6
-rw-r--r--lib/librte_eal/common/include/arch/tile/rte_memcpy.h6
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_memcpy.h116
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_rtm.h9
-rw-r--r--lib/librte_eal/common/include/generic/rte_memcpy.h7
-rw-r--r--lib/librte_eal/common/include/rte_debug.h10
-rw-r--r--lib/librte_eal/common/include/rte_keepalive.h63
-rw-r--r--lib/librte_eal/common/include/rte_lcore.h23
-rw-r--r--lib/librte_eal/common/include/rte_log.h8
-rw-r--r--lib/librte_eal/common/include/rte_memory.h11
-rw-r--r--lib/librte_eal/common/include/rte_pci.h35
-rw-r--r--lib/librte_eal/common/include/rte_pci_dev_ids.h60
-rw-r--r--lib/librte_eal/common/include/rte_version.h6
-rw-r--r--lib/librte_eal/common/rte_keepalive.c64
-rw-r--r--lib/librte_eal/linuxapp/eal/Makefile3
-rw-r--r--lib/librte_eal/linuxapp/eal/eal.c28
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_debug.c6
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_interrupts.c4
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_ivshmem.c11
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_log.c9
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c154
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci.c84
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci_init.h10
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci_uio.c129
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci_vfio.c2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c7
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_thread.c13
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_timer.c4
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_xen_memory.c18
-rw-r--r--lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h4
-rw-r--r--lib/librte_eal/linuxapp/eal/rte_eal_version.map10
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/compat.h9
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/igb_uio.c52
-rw-r--r--lib/librte_eal/linuxapp/kni/Makefile2
-rw-r--r--lib/librte_eal/linuxapp/kni/compat.h21
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c6
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c3
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c4
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_misc.c40
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_net.c109
-rw-r--r--lib/librte_ether/Makefile2
-rw-r--r--lib/librte_ether/rte_eth_ctrl.h7
-rw-r--r--lib/librte_ether/rte_ethdev.c265
-rw-r--r--lib/librte_ether/rte_ethdev.h199
-rw-r--r--lib/librte_ether/rte_ether_version.map10
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.c258
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.h219
-rw-r--r--lib/librte_hash/rte_cuckoo_hash_x86.h193
-rw-r--r--lib/librte_hash/rte_hash.h5
-rw-r--r--lib/librte_ip_frag/Makefile7
-rw-r--r--lib/librte_ip_frag/ip_frag_common.h12
-rw-r--r--lib/librte_ip_frag/rte_ipv4_fragmentation.c2
-rw-r--r--lib/librte_ip_frag/rte_ipv4_reassembly.c8
-rw-r--r--lib/librte_ip_frag/rte_ipv6_fragmentation.c2
-rw-r--r--lib/librte_ip_frag/rte_ipv6_reassembly.c8
-rw-r--r--lib/librte_ivshmem/Makefile4
-rw-r--r--lib/librte_ivshmem/rte_ivshmem.c32
-rw-r--r--lib/librte_jobstats/rte_jobstats.h2
-rw-r--r--lib/librte_kni/Makefile5
-rw-r--r--lib/librte_kni/rte_kni.c12
-rw-r--r--lib/librte_kni/rte_kni.h11
-rw-r--r--lib/librte_lpm/rte_lpm.c1
-rw-r--r--lib/librte_lpm/rte_lpm6.c4
-rw-r--r--lib/librte_lpm/rte_lpm6.h2
-rw-r--r--lib/librte_mbuf/rte_mbuf.c46
-rw-r--r--lib/librte_mbuf/rte_mbuf.h180
-rw-r--r--lib/librte_mempool/Makefile8
-rw-r--r--lib/librte_mempool/rte_dom0_mempool.c133
-rw-r--r--lib/librte_mempool/rte_mempool.c1173
-rw-r--r--lib/librte_mempool/rte_mempool.h1136
-rw-r--r--lib/librte_mempool/rte_mempool_ops.c151
-rw-r--r--lib/librte_mempool/rte_mempool_ring.c161
-rw-r--r--lib/librte_mempool/rte_mempool_stack.c147
-rw-r--r--lib/librte_mempool/rte_mempool_version.map29
-rw-r--r--lib/librte_pdump/Makefile57
-rw-r--r--lib/librte_pdump/rte_pdump.c959
-rw-r--r--lib/librte_pdump/rte_pdump.h216
-rw-r--r--lib/librte_pdump/rte_pdump_version.map13
-rw-r--r--lib/librte_pipeline/Makefile5
-rw-r--r--lib/librte_port/Makefile12
-rw-r--r--lib/librte_port/rte_port_kni.c545
-rw-r--r--lib/librte_port/rte_port_kni.h95
-rw-r--r--lib/librte_port/rte_port_source_sink.c14
-rw-r--r--lib/librte_port/rte_port_source_sink.h3
-rw-r--r--lib/librte_port/rte_port_version.map11
-rw-r--r--lib/librte_power/guest_channel.c6
-rw-r--r--lib/librte_power/rte_power_kvm_vm.c3
-rw-r--r--lib/librte_reorder/Makefile1
-rw-r--r--lib/librte_ring/rte_ring.c16
-rw-r--r--lib/librte_sched/Makefile1
-rw-r--r--lib/librte_sched/rte_red.c2
-rw-r--r--lib/librte_sched/rte_red.h25
-rw-r--r--lib/librte_sched/rte_sched.c22
-rw-r--r--lib/librte_table/rte_table_acl.c2
-rw-r--r--lib/librte_table/rte_table_lpm.c10
-rw-r--r--lib/librte_vhost/Makefile3
-rw-r--r--lib/librte_vhost/rte_vhost_version.map10
-rw-r--r--lib/librte_vhost/rte_virtio_net.h233
-rw-r--r--lib/librte_vhost/vhost-net.h200
-rw-r--r--lib/librte_vhost/vhost_cuse/vhost-net-cdev.c91
-rw-r--r--lib/librte_vhost/vhost_cuse/virtio-net-cdev.c30
-rw-r--r--lib/librte_vhost/vhost_cuse/virtio-net-cdev.h12
-rw-r--r--lib/librte_vhost/vhost_rxtx.c350
-rw-r--r--lib/librte_vhost/vhost_user/vhost-net-user.c450
-rw-r--r--lib/librte_vhost/vhost_user/vhost-net-user.h8
-rw-r--r--lib/librte_vhost/vhost_user/virtio-net-user.c102
-rw-r--r--lib/librte_vhost/vhost_user/virtio-net-user.h18
-rw-r--r--lib/librte_vhost/virtio-net.c239
-rw-r--r--mk/arch/arm/rte.vars.mk5
-rw-r--r--mk/arch/arm64/rte.vars.mk5
-rw-r--r--mk/arch/i686/rte.vars.mk5
-rw-r--r--mk/arch/ppc_64/rte.vars.mk5
-rw-r--r--mk/arch/x86_64/rte.vars.mk5
-rw-r--r--mk/arch/x86_x32/rte.vars.mk5
-rw-r--r--mk/exec-env/linuxapp/rte.vars.mk3
-rw-r--r--mk/machine/armv7a/rte.vars.mk2
-rw-r--r--mk/machine/dpaa2/rte.vars.mk60
-rw-r--r--mk/rte.app.mk149
-rw-r--r--mk/rte.lib.mk29
-rw-r--r--mk/rte.sdkinstall.mk4
-rw-r--r--mk/rte.sdktest.mk14
-rwxr-xr-xscripts/check-git-log.sh24
-rwxr-xr-xscripts/checkpatches.sh47
-rwxr-xr-xscripts/merge-maps.sh29
-rwxr-xr-xscripts/test-build.sh25
-rwxr-xr-xscripts/test-null.sh1
-rwxr-xr-xtools/dpdk_nic_bind.py6
707 files changed, 119371 insertions, 20950 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 1953ea2f..a59191ea 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -56,7 +56,6 @@ F: scripts/auto-config-h.sh
F: scripts/depdirs-rule.sh
F: scripts/gen-build-mk.sh
F: scripts/gen-config-h.sh
-F: scripts/merge-maps.sh
F: scripts/relpath.sh
F: doc/build-sdk-quick.txt
F: doc/guides/prog_guide/build_app.rst
@@ -93,6 +92,7 @@ F: app/test/test_interrupts.c
F: app/test/test_logs.c
F: app/test/test_memcpy*
F: app/test/test_pci.c
+F: app/test/test_pci_sysfs/
F: app/test/test_per_lcore.c
F: app/test/test_prefetch.c
F: app/test/test_rwlock.c
@@ -142,6 +142,7 @@ F: lib/librte_eal/common/include/arch/arm/*_64.h
F: lib/librte_acl/acl_run_neon.*
F: lib/librte_lpm/rte_lpm_neon.h
F: lib/librte_hash/rte*_arm64.h
+F: drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
EZchip TILE-Gx
M: Zhigang Lu <zlu@ezchip.com>
@@ -180,10 +181,8 @@ Linux Xen
F: lib/librte_eal/linuxapp/xen_dom0/
F: lib/librte_eal/linuxapp/eal/*xen*
F: lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
-F: lib/librte_mempool/rte_dom0_mempool.c
F: drivers/net/xenvirt/
F: doc/guides/xen/
-F: app/test-pmd/mempool_*
F: examples/vhost_xen/
FreeBSD EAL (with overlaps)
@@ -270,6 +269,17 @@ M: Evgeny Schemeilin <evgenys@amazon.com>
F: drivers/net/ena/
F: doc/guides/nics/ena.rst
+Broadcom bnxt
+M: Stephen Hurd <stephen.hurd@broadcom.com>
+F: drivers/net/bnxt/
+F: doc/guides/nics/bnxt.rst
+
+Cavium ThunderX nicvf
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
+F: drivers/net/thunderx/
+F: doc/guides/nics/thunderx.rst
+
Chelsio cxgbe
M: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
F: drivers/net/cxgbe/
@@ -332,6 +342,13 @@ M: Rasesh Mody <rasesh.mody@qlogic.com>
F: drivers/net/bnx2x/
F: doc/guides/nics/bnx2x.rst
+QLogic qede PMD
+M: Harish Patil <harish.patil@qlogic.com>
+M: Rasesh Mody <rasesh.mody@qlogic.com>
+M: Sony Chacko <sony.chacko@qlogic.com>
+F: drivers/net/qede/
+F: doc/guides/nics/qede.rst
+
RedHat virtio
M: Huawei Xie <huawei.xie@intel.com>
M: Yuanhan Liu <yuanhan.liu@linux.intel.com>
@@ -391,6 +408,11 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: drivers/crypto/snow3g/
F: doc/guides/cryptodevs/snow3g.rst
+KASUMI PMD
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
+F: drivers/crypto/kasumi/
+F: doc/guides/cryptodevs/kasumi.rst
+
Null Crypto PMD
M: Declan Doherty <declan.doherty@intel.com>
F: drivers/crypto/null/
@@ -437,6 +459,13 @@ F: app/test/test_sched.c
F: examples/qos_sched/
F: doc/guides/sample_app_ug/qos_scheduler.rst
+Packet capture
+M: Reshma Pattan <reshma.pattan@intel.com>
+F: lib/librte_pdump/
+F: doc/guides/prog_guide/pdump_lib.rst
+F: app/pdump/
+F: doc/guides/sample_app_ug/pdump.rst
+
Packet Framework
----------------
@@ -550,9 +579,11 @@ F: app/test/commands.c
F: app/test/packet_burst_generator.c
F: app/test/packet_burst_generator.h
F: app/test/process.h
+F: app/test/resource.*
F: app/test/test.c
F: app/test/test.h
F: app/test/test_pmd_perf.c
+F: app/test/test_resource.c
F: app/test/virtual_pmd.c
F: app/test/virtual_pmd.h
diff --git a/app/Makefile b/app/Makefile
index 1151e094..30ec292a 100644
--- a/app/Makefile
+++ b/app/Makefile
@@ -37,5 +37,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test-pipeline
DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_test
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += proc_info
+DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/app/pdump/Makefile b/app/pdump/Makefile
new file mode 100644
index 00000000..d85bb08e
--- /dev/null
+++ b/app/pdump/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(CONFIG_RTE_LIBRTE_PDUMP),y)
+
+APP = dpdk_pdump
+
+CFLAGS += $(WERROR_FLAGS)
+
+# all source are stored in SRCS-y
+
+SRCS-y := main.c
+
+# this application needs libraries first
+DEPDIRS-y += lib
+
+include $(RTE_SDK)/mk/rte.app.mk
+
+endif
diff --git a/app/pdump/main.c b/app/pdump/main.c
new file mode 100644
index 00000000..2087c159
--- /dev/null
+++ b/app/pdump/main.c
@@ -0,0 +1,844 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <net/if.h>
+
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_errno.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_mempool.h>
+#include <rte_ring.h>
+#include <rte_pdump.h>
+
+#define PDUMP_PORT_ARG "port"
+#define PDUMP_PCI_ARG "device_id"
+#define PDUMP_QUEUE_ARG "queue"
+#define PDUMP_DIR_ARG "dir"
+#define PDUMP_RX_DEV_ARG "rx-dev"
+#define PDUMP_TX_DEV_ARG "tx-dev"
+#define PDUMP_RING_SIZE_ARG "ring-size"
+#define PDUMP_MSIZE_ARG "mbuf-size"
+#define PDUMP_NUM_MBUFS_ARG "total-num-mbufs"
+
+#define VDEV_PCAP "eth_pcap_%s_%d,tx_pcap=%s"
+#define VDEV_IFACE "eth_pcap_%s_%d,tx_iface=%s"
+#define TX_STREAM_SIZE 64
+
+#define MP_NAME "pdump_pool_%d"
+
+#define RX_RING "rx_ring_%d"
+#define TX_RING "tx_ring_%d"
+
+#define RX_STR "rx"
+#define TX_STR "tx"
+
+/* Maximum long option length for option parsing. */
+#define APP_ARG_TCPDUMP_MAX_TUPLES 54
+#define MBUF_POOL_CACHE_SIZE 250
+#define TX_DESC_PER_QUEUE 512
+#define RX_DESC_PER_QUEUE 128
+#define MBUFS_PER_POOL 65535
+#define MAX_LONG_OPT_SZ 64
+#define RING_SIZE 16384
+#define SIZE 256
+#define BURST_SIZE 32
+#define NUM_VDEVS 2
+
+#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
+/* true if x is a power of 2 */
+#define POWEROF2(x) ((((x)-1) & (x)) == 0)
+
+enum pdump_en_dis {
+ DISABLE = 1,
+ ENABLE = 2
+};
+
+enum pcap_stream {
+ IFACE = 1,
+ PCAP = 2
+};
+
+enum pdump_by {
+ PORT_ID = 1,
+ DEVICE_ID = 2
+};
+
+const char *valid_pdump_arguments[] = {
+ PDUMP_PORT_ARG,
+ PDUMP_PCI_ARG,
+ PDUMP_QUEUE_ARG,
+ PDUMP_DIR_ARG,
+ PDUMP_RX_DEV_ARG,
+ PDUMP_TX_DEV_ARG,
+ PDUMP_RING_SIZE_ARG,
+ PDUMP_MSIZE_ARG,
+ PDUMP_NUM_MBUFS_ARG,
+ NULL
+};
+
+struct pdump_stats {
+ uint64_t dequeue_pkts;
+ uint64_t tx_pkts;
+ uint64_t freed_pkts;
+};
+
+struct pdump_tuples {
+ /* cli params */
+ uint8_t port;
+ char *device_id;
+ uint16_t queue;
+ char rx_dev[TX_STREAM_SIZE];
+ char tx_dev[TX_STREAM_SIZE];
+ uint32_t ring_size;
+ uint16_t mbuf_data_size;
+ uint32_t total_num_mbufs;
+
+ /* params for library API call */
+ uint32_t dir;
+ struct rte_mempool *mp;
+ struct rte_ring *rx_ring;
+ struct rte_ring *tx_ring;
+
+ /* params for packet dumping */
+ enum pdump_by dump_by_type;
+ int rx_vdev_id;
+ int tx_vdev_id;
+ enum pcap_stream rx_vdev_stream_type;
+ enum pcap_stream tx_vdev_stream_type;
+ bool single_pdump_dev;
+
+ /* stats */
+ struct pdump_stats stats;
+} __rte_cache_aligned;
+static struct pdump_tuples pdump_t[APP_ARG_TCPDUMP_MAX_TUPLES];
+
+struct parse_val {
+ uint64_t min;
+ uint64_t max;
+ uint64_t val;
+};
+
+int num_tuples;
+static struct rte_eth_conf port_conf_default;
+volatile uint8_t quit_signal;
+
+/**< display usage */
+static void
+pdump_usage(const char *prgname)
+{
+ printf("usage: %s [EAL options] -- --pdump "
+ "'(port=<port id> | device_id=<pci id or vdev name>),"
+ "(queue=<queue_id>),"
+ "(rx-dev=<iface or pcap file> |"
+ " tx-dev=<iface or pcap file>,"
+ "[ring-size=<ring size>default:16384],"
+ "[mbuf-size=<mbuf data size>default:2176],"
+ "[total-num-mbufs=<number of mbufs>default:65535]"
+ "'\n",
+ prgname);
+}
+
+static int
+parse_device_id(const char *key __rte_unused, const char *value,
+ void *extra_args)
+{
+ struct pdump_tuples *pt = extra_args;
+
+ pt->device_id = strdup(value);
+ pt->dump_by_type = DEVICE_ID;
+
+ return 0;
+}
+
+static int
+parse_queue(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ unsigned long n;
+ struct pdump_tuples *pt = extra_args;
+
+ if (!strcmp(value, "*"))
+ pt->queue = RTE_PDUMP_ALL_QUEUES;
+ else {
+ n = strtoul(value, NULL, 10);
+ pt->queue = (uint16_t) n;
+ }
+ return 0;
+}
+
+static int
+parse_rxtxdev(const char *key, const char *value, void *extra_args)
+{
+
+ struct pdump_tuples *pt = extra_args;
+
+ if (!strcmp(key, PDUMP_RX_DEV_ARG)) {
+ snprintf(pt->rx_dev, sizeof(pt->rx_dev), "%s", value);
+ /* identify the tx stream type for pcap vdev */
+ if (if_nametoindex(pt->rx_dev))
+ pt->rx_vdev_stream_type = IFACE;
+ } else if (!strcmp(key, PDUMP_TX_DEV_ARG)) {
+ snprintf(pt->tx_dev, sizeof(pt->tx_dev), "%s", value);
+ /* identify the tx stream type for pcap vdev */
+ if (if_nametoindex(pt->tx_dev))
+ pt->tx_vdev_stream_type = IFACE;
+ } else {
+ printf("invalid dev type %s, must be rx or tx\n", value);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_uint_value(const char *key, const char *value, void *extra_args)
+{
+ struct parse_val *v;
+ unsigned long t;
+ char *end;
+ int ret = 0;
+
+ errno = 0;
+ v = extra_args;
+ t = strtoul(value, &end, 10);
+
+ if (errno != 0 || end[0] != 0 || t < v->min || t > v->max) {
+ printf("invalid value:\"%s\" for key:\"%s\", "
+ "value must be >= %"PRIu64" and <= %"PRIu64"\n",
+ value, key, v->min, v->max);
+ ret = -EINVAL;
+ }
+ if (!strcmp(key, PDUMP_RING_SIZE_ARG) && !POWEROF2(t)) {
+ printf("invalid value:\"%s\" for key:\"%s\", "
+ "value must be power of 2\n", value, key);
+ ret = -EINVAL;
+ }
+
+ if (ret != 0)
+ return ret;
+
+ v->val = t;
+ return 0;
+}
+
+static int
+parse_pdump(const char *optarg)
+{
+ struct rte_kvargs *kvlist;
+ int ret = 0, cnt1, cnt2;
+ struct pdump_tuples *pt;
+ struct parse_val v = {0};
+
+ pt = &pdump_t[num_tuples];
+
+ /* initial check for invalid arguments */
+ kvlist = rte_kvargs_parse(optarg, valid_pdump_arguments);
+ if (kvlist == NULL) {
+ printf("--pdump=\"%s\": invalid argument passed\n", optarg);
+ return -1;
+ }
+
+ /* port/device_id parsing and validation */
+ cnt1 = rte_kvargs_count(kvlist, PDUMP_PORT_ARG);
+ cnt2 = rte_kvargs_count(kvlist, PDUMP_PCI_ARG);
+ if (!((cnt1 == 1 && cnt2 == 0) || (cnt1 == 0 && cnt2 == 1))) {
+ printf("--pdump=\"%s\": must have either port or "
+ "device_id argument\n", optarg);
+ ret = -1;
+ goto free_kvlist;
+ } else if (cnt1 == 1) {
+ v.min = 0;
+ v.max = RTE_MAX_ETHPORTS-1;
+ ret = rte_kvargs_process(kvlist, PDUMP_PORT_ARG,
+ &parse_uint_value, &v);
+ if (ret < 0)
+ goto free_kvlist;
+ pt->port = (uint8_t) v.val;
+ pt->dump_by_type = PORT_ID;
+ } else if (cnt2 == 1) {
+ ret = rte_kvargs_process(kvlist, PDUMP_PCI_ARG,
+ &parse_device_id, pt);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+ /* queue parsing and validation */
+ cnt1 = rte_kvargs_count(kvlist, PDUMP_QUEUE_ARG);
+ if (cnt1 != 1) {
+ printf("--pdump=\"%s\": must have queue argument\n", optarg);
+ ret = -1;
+ goto free_kvlist;
+ }
+ ret = rte_kvargs_process(kvlist, PDUMP_QUEUE_ARG, &parse_queue, pt);
+ if (ret < 0)
+ goto free_kvlist;
+
+ /* rx-dev and tx-dev parsing and validation */
+ cnt1 = rte_kvargs_count(kvlist, PDUMP_RX_DEV_ARG);
+ cnt2 = rte_kvargs_count(kvlist, PDUMP_TX_DEV_ARG);
+ if (cnt1 == 0 && cnt2 == 0) {
+ printf("--pdump=\"%s\": must have either rx-dev or "
+ "tx-dev argument\n", optarg);
+ ret = -1;
+ goto free_kvlist;
+ } else if (cnt1 == 1 && cnt2 == 1) {
+ ret = rte_kvargs_process(kvlist, PDUMP_RX_DEV_ARG,
+ &parse_rxtxdev, pt);
+ if (ret < 0)
+ goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, PDUMP_TX_DEV_ARG,
+ &parse_rxtxdev, pt);
+ if (ret < 0)
+ goto free_kvlist;
+ /* if captured packets has to send to the same vdev */
+ if (!strcmp(pt->rx_dev, pt->tx_dev))
+ pt->single_pdump_dev = true;
+ pt->dir = RTE_PDUMP_FLAG_RXTX;
+ } else if (cnt1 == 1) {
+ ret = rte_kvargs_process(kvlist, PDUMP_RX_DEV_ARG,
+ &parse_rxtxdev, pt);
+ if (ret < 0)
+ goto free_kvlist;
+ pt->dir = RTE_PDUMP_FLAG_RX;
+ } else if (cnt2 == 1) {
+ ret = rte_kvargs_process(kvlist, PDUMP_TX_DEV_ARG,
+ &parse_rxtxdev, pt);
+ if (ret < 0)
+ goto free_kvlist;
+ pt->dir = RTE_PDUMP_FLAG_TX;
+ }
+
+ /* optional */
+ /* ring_size parsing and validation */
+ cnt1 = rte_kvargs_count(kvlist, PDUMP_RING_SIZE_ARG);
+ if (cnt1 == 1) {
+ v.min = 2;
+ v.max = RTE_RING_SZ_MASK-1;
+ ret = rte_kvargs_process(kvlist, PDUMP_RING_SIZE_ARG,
+ &parse_uint_value, &v);
+ if (ret < 0)
+ goto free_kvlist;
+ pt->ring_size = (uint32_t) v.val;
+ } else
+ pt->ring_size = RING_SIZE;
+
+ /* mbuf_data_size parsing and validation */
+ cnt1 = rte_kvargs_count(kvlist, PDUMP_MSIZE_ARG);
+ if (cnt1 == 1) {
+ v.min = 1;
+ v.max = UINT16_MAX;
+ ret = rte_kvargs_process(kvlist, PDUMP_MSIZE_ARG,
+ &parse_uint_value, &v);
+ if (ret < 0)
+ goto free_kvlist;
+ pt->mbuf_data_size = (uint16_t) v.val;
+ } else
+ pt->mbuf_data_size = RTE_MBUF_DEFAULT_BUF_SIZE;
+
+ /* total_num_mbufs parsing and validation */
+ cnt1 = rte_kvargs_count(kvlist, PDUMP_NUM_MBUFS_ARG);
+ if (cnt1 == 1) {
+ v.min = 1025;
+ v.max = UINT16_MAX;
+ ret = rte_kvargs_process(kvlist, PDUMP_NUM_MBUFS_ARG,
+ &parse_uint_value, &v);
+ if (ret < 0)
+ goto free_kvlist;
+ pt->total_num_mbufs = (uint16_t) v.val;
+ } else
+ pt->total_num_mbufs = MBUFS_PER_POOL;
+
+ num_tuples++;
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+launch_args_parse(int argc, char **argv, char *prgname)
+{
+ int opt, ret;
+ int option_index;
+ static struct option long_option[] = {
+ {"pdump", 1, 0, 0},
+ {NULL, 0, 0, 0}
+ };
+
+ if (argc == 1)
+ pdump_usage(prgname);
+
+ /* Parse command line */
+ while ((opt = getopt_long(argc, argv, " ",
+ long_option, &option_index)) != EOF) {
+ switch (opt) {
+ case 0:
+ if (!strncmp(long_option[option_index].name, "pdump",
+ MAX_LONG_OPT_SZ)) {
+ ret = parse_pdump(optarg);
+ if (ret) {
+ pdump_usage(prgname);
+ return -1;
+ }
+ }
+ break;
+ default:
+ pdump_usage(prgname);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+print_pdump_stats(void)
+{
+ int i;
+ struct pdump_tuples *pt;
+
+ for (i = 0; i < num_tuples; i++) {
+ printf("##### PDUMP DEBUG STATS #####\n");
+ pt = &pdump_t[i];
+ printf(" -packets dequeued: %"PRIu64"\n",
+ pt->stats.dequeue_pkts);
+ printf(" -packets transmitted to vdev: %"PRIu64"\n",
+ pt->stats.tx_pkts);
+ printf(" -packets freed: %"PRIu64"\n",
+ pt->stats.freed_pkts);
+ }
+}
+
+static inline void
+disable_pdump(struct pdump_tuples *pt)
+{
+ if (pt->dump_by_type == DEVICE_ID)
+ rte_pdump_disable_by_deviceid(pt->device_id, pt->queue,
+ pt->dir);
+ else if (pt->dump_by_type == PORT_ID)
+ rte_pdump_disable(pt->port, pt->queue, pt->dir);
+}
+
+static inline void
+pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)
+{
+ /* write input packets of port to vdev for pdump */
+ struct rte_mbuf *rxtx_bufs[BURST_SIZE];
+
+ /* first dequeue packets from ring of primary process */
+ const uint16_t nb_in_deq = rte_ring_dequeue_burst(ring,
+ (void *)rxtx_bufs, BURST_SIZE);
+ stats->dequeue_pkts += nb_in_deq;
+
+ if (nb_in_deq) {
+ /* then sent on vdev */
+ uint16_t nb_in_txd = rte_eth_tx_burst(
+ vdev_id,
+ 0, rxtx_bufs, nb_in_deq);
+ stats->tx_pkts += nb_in_txd;
+
+ if (unlikely(nb_in_txd < nb_in_deq)) {
+ do {
+ rte_pktmbuf_free(rxtx_bufs[nb_in_txd]);
+ stats->freed_pkts++;
+ } while (++nb_in_txd < nb_in_deq);
+ }
+ }
+}
+
+static void
+free_ring_data(struct rte_ring *ring, uint8_t vdev_id,
+ struct pdump_stats *stats)
+{
+ while (rte_ring_count(ring))
+ pdump_rxtx(ring, vdev_id, stats);
+}
+
+static void
+cleanup_pdump_resources(void)
+{
+ int i;
+ struct pdump_tuples *pt;
+
+ /* disable pdump and free the pdump_tuple resources */
+ for (i = 0; i < num_tuples; i++) {
+ pt = &pdump_t[i];
+
+ /* remove callbacks */
+ disable_pdump(pt);
+
+ /*
+ * transmit rest of the enqueued packets of the rings on to
+ * the vdev, in order to release mbufs to the mepool.
+ **/
+ if (pt->dir & RTE_PDUMP_FLAG_RX)
+ free_ring_data(pt->rx_ring, pt->rx_vdev_id, &pt->stats);
+ if (pt->dir & RTE_PDUMP_FLAG_TX)
+ free_ring_data(pt->tx_ring, pt->tx_vdev_id, &pt->stats);
+
+ if (pt->device_id)
+ free(pt->device_id);
+
+ /* free the rings */
+ if (pt->rx_ring)
+ rte_ring_free(pt->rx_ring);
+ if (pt->tx_ring)
+ rte_ring_free(pt->tx_ring);
+ }
+}
+
+static void
+signal_handler(int sig_num)
+{
+ if (sig_num == SIGINT) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ sig_num);
+ quit_signal = 1;
+ }
+}
+
+static inline int
+configure_vdev(uint8_t port_id)
+{
+ struct ether_addr addr;
+ const uint16_t rxRings = 0, txRings = 1;
+ const uint8_t nb_ports = rte_eth_dev_count();
+ int ret;
+ uint16_t q;
+
+ if (port_id > nb_ports)
+ return -1;
+
+ ret = rte_eth_dev_configure(port_id, rxRings, txRings,
+ &port_conf_default);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "dev config failed\n");
+
+ for (q = 0; q < txRings; q++) {
+ ret = rte_eth_tx_queue_setup(port_id, q, TX_DESC_PER_QUEUE,
+ rte_eth_dev_socket_id(port_id), NULL);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "queue setup failed\n");
+ }
+
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "dev start failed\n");
+
+ rte_eth_macaddr_get(port_id, &addr);
+ printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
+ " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
+ (unsigned)port_id,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ rte_eth_promiscuous_enable(port_id);
+
+ return 0;
+}
+
+static void
+create_mp_ring_vdev(void)
+{
+ int i;
+ uint8_t portid;
+ struct pdump_tuples *pt = NULL;
+ struct rte_mempool *mbuf_pool = NULL;
+ char vdev_args[SIZE];
+ char ring_name[SIZE];
+ char mempool_name[SIZE];
+
+ for (i = 0; i < num_tuples; i++) {
+ pt = &pdump_t[i];
+ snprintf(mempool_name, SIZE, MP_NAME, i);
+ mbuf_pool = rte_mempool_lookup(mempool_name);
+ if (mbuf_pool == NULL) {
+ /* create mempool */
+ mbuf_pool = rte_pktmbuf_pool_create(mempool_name,
+ pt->total_num_mbufs,
+ MBUF_POOL_CACHE_SIZE, 0,
+ pt->mbuf_data_size,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Mempool creation failed: %s\n",
+ rte_strerror(rte_errno));
+ }
+ pt->mp = mbuf_pool;
+
+ if (pt->dir == RTE_PDUMP_FLAG_RXTX) {
+ /* if captured packets has to send to the same vdev */
+ /* create rx_ring */
+ snprintf(ring_name, SIZE, RX_RING, i);
+ pt->rx_ring = rte_ring_create(ring_name, pt->ring_size,
+ rte_socket_id(), 0);
+ if (pt->rx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "%s:%s:%d\n",
+ rte_strerror(rte_errno),
+ __func__, __LINE__);
+
+ /* create tx_ring */
+ snprintf(ring_name, SIZE, TX_RING, i);
+ pt->tx_ring = rte_ring_create(ring_name, pt->ring_size,
+ rte_socket_id(), 0);
+ if (pt->tx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "%s:%s:%d\n",
+ rte_strerror(rte_errno),
+ __func__, __LINE__);
+
+ /* create vdevs */
+ (pt->rx_vdev_stream_type == IFACE) ?
+ snprintf(vdev_args, SIZE, VDEV_IFACE, RX_STR, i,
+ pt->rx_dev) :
+ snprintf(vdev_args, SIZE, VDEV_PCAP, RX_STR, i,
+ pt->rx_dev);
+ if (rte_eth_dev_attach(vdev_args, &portid) < 0)
+ rte_exit(EXIT_FAILURE,
+ "vdev creation failed:%s:%d\n",
+ __func__, __LINE__);
+ pt->rx_vdev_id = portid;
+
+ /* configure vdev */
+ configure_vdev(pt->rx_vdev_id);
+
+ if (pt->single_pdump_dev)
+ pt->tx_vdev_id = portid;
+ else {
+ (pt->tx_vdev_stream_type == IFACE) ?
+ snprintf(vdev_args, SIZE, VDEV_IFACE, TX_STR, i,
+ pt->tx_dev) :
+ snprintf(vdev_args, SIZE, VDEV_PCAP, TX_STR, i,
+ pt->tx_dev);
+ if (rte_eth_dev_attach(vdev_args, &portid) < 0)
+ rte_exit(EXIT_FAILURE,
+ "vdev creation failed:"
+ "%s:%d\n", __func__, __LINE__);
+ pt->tx_vdev_id = portid;
+
+ /* configure vdev */
+ configure_vdev(pt->tx_vdev_id);
+ }
+ } else if (pt->dir == RTE_PDUMP_FLAG_RX) {
+
+ /* create rx_ring */
+ snprintf(ring_name, SIZE, RX_RING, i);
+ pt->rx_ring = rte_ring_create(ring_name, pt->ring_size,
+ rte_socket_id(), 0);
+ if (pt->rx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n",
+ rte_strerror(rte_errno));
+
+ (pt->rx_vdev_stream_type == IFACE) ?
+ snprintf(vdev_args, SIZE, VDEV_IFACE, RX_STR, i,
+ pt->rx_dev) :
+ snprintf(vdev_args, SIZE, VDEV_PCAP, RX_STR, i,
+ pt->rx_dev);
+ if (rte_eth_dev_attach(vdev_args, &portid) < 0)
+ rte_exit(EXIT_FAILURE,
+ "vdev creation failed:%s:%d\n",
+ __func__, __LINE__);
+ pt->rx_vdev_id = portid;
+ /* configure vdev */
+ configure_vdev(pt->rx_vdev_id);
+ } else if (pt->dir == RTE_PDUMP_FLAG_TX) {
+
+ /* create tx_ring */
+ snprintf(ring_name, SIZE, TX_RING, i);
+ pt->tx_ring = rte_ring_create(ring_name, pt->ring_size,
+ rte_socket_id(), 0);
+ if (pt->tx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n",
+ rte_strerror(rte_errno));
+
+ (pt->tx_vdev_stream_type == IFACE) ?
+ snprintf(vdev_args, SIZE, VDEV_IFACE, TX_STR, i,
+ pt->tx_dev) :
+ snprintf(vdev_args, SIZE, VDEV_PCAP, TX_STR, i,
+ pt->tx_dev);
+ if (rte_eth_dev_attach(vdev_args, &portid) < 0)
+ rte_exit(EXIT_FAILURE,
+ "vdev creation failed\n");
+ pt->tx_vdev_id = portid;
+
+ /* configure vdev */
+ configure_vdev(pt->tx_vdev_id);
+ }
+ }
+}
+
+static void
+enable_pdump(void)
+{
+ int i;
+ struct pdump_tuples *pt;
+ int ret = 0, ret1 = 0;
+
+ for (i = 0; i < num_tuples; i++) {
+ pt = &pdump_t[i];
+ if (pt->dir == RTE_PDUMP_FLAG_RXTX) {
+ if (pt->dump_by_type == DEVICE_ID) {
+ ret = rte_pdump_enable_by_deviceid(
+ pt->device_id,
+ pt->queue,
+ RTE_PDUMP_FLAG_RX,
+ pt->rx_ring,
+ pt->mp, NULL);
+ ret = rte_pdump_enable_by_deviceid(
+ pt->device_id,
+ pt->queue,
+ RTE_PDUMP_FLAG_TX,
+ pt->tx_ring,
+ pt->mp, NULL);
+ } else if (pt->dump_by_type == PORT_ID) {
+ ret = rte_pdump_enable(pt->port, pt->queue,
+ RTE_PDUMP_FLAG_RX,
+ pt->rx_ring, pt->mp, NULL);
+ ret1 = rte_pdump_enable(pt->port, pt->queue,
+ RTE_PDUMP_FLAG_TX,
+ pt->tx_ring, pt->mp, NULL);
+ }
+ } else if (pt->dir == RTE_PDUMP_FLAG_RX) {
+ if (pt->dump_by_type == DEVICE_ID)
+ ret = rte_pdump_enable_by_deviceid(
+ pt->device_id,
+ pt->queue,
+ pt->dir, pt->rx_ring,
+ pt->mp, NULL);
+ else if (pt->dump_by_type == PORT_ID)
+ ret = rte_pdump_enable(pt->port, pt->queue,
+ pt->dir,
+ pt->rx_ring, pt->mp, NULL);
+ } else if (pt->dir == RTE_PDUMP_FLAG_TX) {
+ if (pt->dump_by_type == DEVICE_ID)
+ ret = rte_pdump_enable_by_deviceid(
+ pt->device_id,
+ pt->queue,
+ pt->dir,
+ pt->tx_ring, pt->mp, NULL);
+ else if (pt->dump_by_type == PORT_ID)
+ ret = rte_pdump_enable(pt->port, pt->queue,
+ pt->dir,
+ pt->tx_ring, pt->mp, NULL);
+ }
+ if (ret < 0 || ret1 < 0) {
+ cleanup_pdump_resources();
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ }
+ }
+}
+
+static inline void
+dump_packets(void)
+{
+ int i;
+ struct pdump_tuples *pt;
+
+ while (!quit_signal) {
+ for (i = 0; i < num_tuples; i++) {
+ pt = &pdump_t[i];
+ if (pt->dir & RTE_PDUMP_FLAG_RX)
+ pdump_rxtx(pt->rx_ring, pt->rx_vdev_id,
+ &pt->stats);
+ if (pt->dir & RTE_PDUMP_FLAG_TX)
+ pdump_rxtx(pt->tx_ring, pt->tx_vdev_id,
+ &pt->stats);
+ }
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int diag;
+ int ret;
+ int i;
+
+ char c_flag[] = "-c1";
+ char n_flag[] = "-n4";
+ char mp_flag[] = "--proc-type=secondary";
+ char *argp[argc + 3];
+
+ /* catch ctrl-c so we can print on exit */
+ signal(SIGINT, signal_handler);
+
+ argp[0] = argv[0];
+ argp[1] = c_flag;
+ argp[2] = n_flag;
+ argp[3] = mp_flag;
+
+ for (i = 1; i < argc; i++)
+ argp[i + 3] = argv[i];
+
+ argc += 3;
+
+ diag = rte_eal_init(argc, argp);
+ if (diag < 0)
+ rte_panic("Cannot init EAL\n");
+
+ argc -= diag;
+ argv += (diag - 3);
+
+ /* parse app arguments */
+ if (argc > 1) {
+ ret = launch_args_parse(argc, argv, argp[0]);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid argument\n");
+ }
+
+ /* create mempool, ring and vdevs info */
+ create_mp_ring_vdev();
+ enable_pdump();
+ dump_packets();
+
+ cleanup_pdump_resources();
+ /* dump debug stats */
+ print_pdump_stats();
+
+ return 0;
+}
diff --git a/app/proc_info/main.c b/app/proc_info/main.c
index 341176db..6dc0bbb8 100644
--- a/app/proc_info/main.c
+++ b/app/proc_info/main.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -243,11 +243,12 @@ nic_stats_clear(uint8_t port_id)
static void
nic_xstats_display(uint8_t port_id)
{
- struct rte_eth_xstats *xstats;
+ struct rte_eth_xstat_name *xstats_names;
+ struct rte_eth_xstat *xstats;
int len, ret, i;
static const char *nic_stats_border = "########################";
- len = rte_eth_xstats_get(port_id, NULL, 0);
+ len = rte_eth_xstats_get_names(port_id, NULL, 0);
if (len < 0) {
printf("Cannot get xstats count\n");
return;
@@ -258,6 +259,18 @@ nic_xstats_display(uint8_t port_id)
return;
}
+ xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * len);
+ if (xstats_names == NULL) {
+ printf("Cannot allocate memory for xstat names\n");
+ free(xstats);
+ return;
+ }
+ if (len != rte_eth_xstats_get_names(
+ port_id, xstats_names, len)) {
+ printf("Cannot get xstat names\n");
+ return;
+ }
+
printf("###### NIC extended statistics for port %-2d #########\n",
port_id);
printf("%s############################\n",
@@ -270,11 +283,14 @@ nic_xstats_display(uint8_t port_id)
}
for (i = 0; i < len; i++)
- printf("%s: %"PRIu64"\n", xstats[i].name, xstats[i].value);
+ printf("%s: %"PRIu64"\n",
+ xstats_names[i].name,
+ xstats[i].value);
printf("%s############################\n",
nic_stats_border);
free(xstats);
+ free(xstats_names);
}
static void
@@ -327,10 +343,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
-
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* If no port mask was specified*/
if (enabled_port_mask == 0)
enabled_port_mask = 0xffff;
diff --git a/app/test-acl/main.c b/app/test-acl/main.c
index 0b0c093c..d3669814 100644
--- a/app/test-acl/main.c
+++ b/app/test-acl/main.c
@@ -901,7 +901,7 @@ search_ip5tuples(__attribute__((unused)) void *arg)
"%s @lcore %u: %" PRIu32 " iterations, %" PRIu64 " pkts, %"
PRIu32 " categories, %" PRIu64 " cycles, %#Lf cycles/pkt\n",
__func__, lcore, i, pkt, config.run_categories,
- tm, (long double)tm / pkt);
+ tm, (pkt == 0) ? 0 : (long double)tm / pkt);
return 0;
}
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index 72426f31..2a0b5a5d 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -50,7 +50,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline.c
SRCS-y += config.c
SRCS-y += iofwd.c
SRCS-y += macfwd.c
-SRCS-y += macfwd-retry.c
SRCS-y += macswap.c
SRCS-y += flowgen.c
SRCS-y += rxonly.c
@@ -58,11 +57,7 @@ SRCS-y += txonly.c
SRCS-y += csumonly.c
SRCS-y += icmpecho.c
SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
-SRCS-y += mempool_anon.c
-ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
-CFLAGS_mempool_anon.o := -D_GNU_SOURCE
-endif
CFLAGS_cmdline.o := -D_GNU_SOURCE
# this application needs libraries first
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index c5b94797..b6b61ad3 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -246,8 +246,8 @@ static void cmd_help_long_parsed(void *parsed_result,
" Set number of packets per burst.\n\n"
"set burst tx delay (microseconds) retry (num)\n"
- " Set the transmit delay time and number of retries"
- " in mac_retry forwarding mode.\n\n"
+ " Set the transmit delay time and number of retries,"
+ " effective when retry is enabled.\n\n"
"set txpkts (x[,y]*)\n"
" Set the length of each segment of TXONLY"
@@ -559,13 +559,13 @@ static void cmd_help_long_parsed(void *parsed_result,
"port config all max-pkt-len (value)\n"
" Set the max packet length.\n\n"
- "port config all (crc-strip|rx-cksum|hw-vlan|hw-vlan-filter|"
+ "port config all (crc-strip|scatter|rx-cksum|hw-vlan|hw-vlan-filter|"
"hw-vlan-strip|hw-vlan-extend|drop-en)"
" (on|off)\n"
- " Set crc-strip/rx-checksum/hardware-vlan/drop_en"
+ " Set crc-strip/scatter/rx-checksum/hardware-vlan/drop_en"
" for ports.\n\n"
- "port config all rss (all|ip|tcp|udp|sctp|ether|none)\n"
+ "port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none)\n"
" Set the RSS mode.\n\n"
"port config port-id rss reta (hash,queue)[,(hash,queue)]\n"
@@ -1223,6 +1223,8 @@ cmd_config_rx_tx_parsed(void *parsed_result,
return;
}
+ fwd_config_setup();
+
init_port_config();
cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
@@ -1410,6 +1412,15 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
printf("Unknown parameter\n");
return;
}
+ } else if (!strcmp(res->name, "scatter")) {
+ if (!strcmp(res->value, "on"))
+ rx_mode.enable_scatter = 1;
+ else if (!strcmp(res->value, "off"))
+ rx_mode.enable_scatter = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
} else if (!strcmp(res->name, "rx-cksum")) {
if (!strcmp(res->value, "on"))
rx_mode.hw_ip_checksum = 1;
@@ -1487,7 +1498,7 @@ cmdline_parse_token_string_t cmd_config_rx_mode_flag_all =
TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, all, "all");
cmdline_parse_token_string_t cmd_config_rx_mode_flag_name =
TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, name,
- "crc-strip#rx-cksum#hw-vlan#"
+ "crc-strip#scatter#rx-cksum#hw-vlan#"
"hw-vlan-filter#hw-vlan-strip#hw-vlan-extend");
cmdline_parse_token_string_t cmd_config_rx_mode_flag_value =
TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, value,
@@ -1496,7 +1507,7 @@ cmdline_parse_token_string_t cmd_config_rx_mode_flag_value =
cmdline_parse_inst_t cmd_config_rx_mode_flag = {
.f = cmd_config_rx_mode_flag_parsed,
.data = NULL,
- .help_str = "port config all crc-strip|rx-cksum|hw-vlan|"
+ .help_str = "port config all crc-strip|scatter|rx-cksum|hw-vlan|"
"hw-vlan-filter|hw-vlan-strip|hw-vlan-extend on|off",
.tokens = {
(void *)&cmd_config_rx_mode_flag_port,
@@ -1524,6 +1535,7 @@ cmd_config_rss_parsed(void *parsed_result,
{
struct cmd_config_rss *res = parsed_result;
struct rte_eth_rss_conf rss_conf;
+ int diag;
uint8_t i;
if (!strcmp(res->value, "all"))
@@ -1540,6 +1552,14 @@ cmd_config_rss_parsed(void *parsed_result,
rss_conf.rss_hf = ETH_RSS_SCTP;
else if (!strcmp(res->value, "ether"))
rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD;
+ else if (!strcmp(res->value, "port"))
+ rss_conf.rss_hf = ETH_RSS_PORT;
+ else if (!strcmp(res->value, "vxlan"))
+ rss_conf.rss_hf = ETH_RSS_VXLAN;
+ else if (!strcmp(res->value, "geneve"))
+ rss_conf.rss_hf = ETH_RSS_GENEVE;
+ else if (!strcmp(res->value, "nvgre"))
+ rss_conf.rss_hf = ETH_RSS_NVGRE;
else if (!strcmp(res->value, "none"))
rss_conf.rss_hf = 0;
else {
@@ -1547,8 +1567,13 @@ cmd_config_rss_parsed(void *parsed_result,
return;
}
rss_conf.rss_key = NULL;
- for (i = 0; i < rte_eth_dev_count(); i++)
- rte_eth_dev_rss_hash_update(i, &rss_conf);
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ diag = rte_eth_dev_rss_hash_update(i, &rss_conf);
+ if (diag < 0)
+ printf("Configuration of RSS hash at ethernet port %d "
+ "failed with error (%d): %s.\n",
+ i, -diag, strerror(-diag));
+ }
}
cmdline_parse_token_string_t cmd_config_rss_port =
@@ -1561,12 +1586,12 @@ cmdline_parse_token_string_t cmd_config_rss_name =
TOKEN_STRING_INITIALIZER(struct cmd_config_rss, name, "rss");
cmdline_parse_token_string_t cmd_config_rss_value =
TOKEN_STRING_INITIALIZER(struct cmd_config_rss, value,
- "all#ip#tcp#udp#sctp#ether#none");
+ "all#ip#tcp#udp#sctp#ether#port#vxlan#geneve#nvgre#none");
cmdline_parse_inst_t cmd_config_rss = {
.f = cmd_config_rss_parsed,
.data = NULL,
- .help_str = "port config all rss all|ip|tcp|udp|sctp|ether|none",
+ .help_str = "port config all rss all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none",
.tokens = {
(void *)&cmd_config_rss_port,
(void *)&cmd_config_rss_keyword,
@@ -2511,16 +2536,20 @@ static void cmd_set_list_parsed(void *parsed_result,
nb_item = parse_item_list(res->list_of_items, "core",
RTE_MAX_LCORE,
parsed_items.lcorelist, 1);
- if (nb_item > 0)
+ if (nb_item > 0) {
set_fwd_lcores_list(parsed_items.lcorelist, nb_item);
+ fwd_config_setup();
+ }
return;
}
if (!strcmp(res->list_name, "portlist")) {
nb_item = parse_item_list(res->list_of_items, "port",
RTE_MAX_ETHPORTS,
parsed_items.portlist, 1);
- if (nb_item > 0)
+ if (nb_item > 0) {
set_fwd_ports_list(parsed_items.portlist, nb_item);
+ fwd_config_setup();
+ }
}
}
@@ -2564,10 +2593,13 @@ static void cmd_set_mask_parsed(void *parsed_result,
printf("Please stop forwarding first\n");
return;
}
- if (!strcmp(res->mask, "coremask"))
+ if (!strcmp(res->mask, "coremask")) {
set_fwd_lcores_mask(res->hexavalue);
- else if (!strcmp(res->mask, "portmask"))
+ fwd_config_setup();
+ } else if (!strcmp(res->mask, "portmask")) {
set_fwd_ports_mask(res->hexavalue);
+ fwd_config_setup();
+ }
}
cmdline_parse_token_string_t cmd_setmask_set =
@@ -2604,11 +2636,13 @@ static void cmd_set_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_set_result *res = parsed_result;
- if (!strcmp(res->what, "nbport"))
+ if (!strcmp(res->what, "nbport")) {
set_fwd_ports_number(res->value);
- else if (!strcmp(res->what, "nbcore"))
+ fwd_config_setup();
+ } else if (!strcmp(res->what, "nbcore")) {
set_fwd_lcores_number(res->value);
- else if (!strcmp(res->what, "burst"))
+ fwd_config_setup();
+ } else if (!strcmp(res->what, "burst"))
set_nb_pkt_per_burst(res->value);
else if (!strcmp(res->what, "verbose"))
set_verbose_level(res->value);
@@ -2721,6 +2755,74 @@ cmdline_parse_inst_t cmd_set_txsplit = {
},
};
+/* *** CONFIG TX QUEUE FLAGS *** */
+
+struct cmd_config_txqflags_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t what;
+ int32_t hexvalue;
+};
+
+static void cmd_config_txqflags_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_txqflags_result *res = parsed_result;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ if (strcmp(res->what, "txqflags")) {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (res->hexvalue >= 0) {
+ txq_flags = res->hexvalue;
+ } else {
+ printf("txqflags must be >= 0\n");
+ return;
+ }
+
+ init_port_config();
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_txqflags_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, port,
+ "port");
+cmdline_parse_token_string_t cmd_config_txqflags_config =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, config,
+ "config");
+cmdline_parse_token_string_t cmd_config_txqflags_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, all,
+ "all");
+cmdline_parse_token_string_t cmd_config_txqflags_what =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, what,
+ "txqflags");
+cmdline_parse_token_num_t cmd_config_txqflags_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_txqflags_result,
+ hexvalue, INT32);
+
+cmdline_parse_inst_t cmd_config_txqflags = {
+ .f = cmd_config_txqflags_parsed,
+ .data = NULL,
+ .help_str = "port config all txqflags value",
+ .tokens = {
+ (void *)&cmd_config_txqflags_port,
+ (void *)&cmd_config_txqflags_config,
+ (void *)&cmd_config_txqflags_all,
+ (void *)&cmd_config_txqflags_what,
+ (void *)&cmd_config_txqflags_value,
+ NULL,
+ },
+};
+
/* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */
struct cmd_rx_vlan_filter_all_result {
cmdline_fixed_string_t rx_vlan;
@@ -4480,6 +4582,7 @@ static void cmd_set_fwd_mode_parsed(void *parsed_result,
{
struct cmd_set_fwd_mode_result *res = parsed_result;
+ retry_enabled = 0;
set_pkt_forwarding_mode(res->mode);
}
@@ -4525,6 +4628,74 @@ static void cmd_set_fwd_mode_init(void)
token_struct->string_data.str = token;
}
+/* *** SET RETRY FORWARDING MODE *** */
+struct cmd_set_fwd_retry_mode_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t fwd;
+ cmdline_fixed_string_t mode;
+ cmdline_fixed_string_t retry;
+};
+
+static void cmd_set_fwd_retry_mode_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_fwd_retry_mode_result *res = parsed_result;
+
+ retry_enabled = 1;
+ set_pkt_forwarding_mode(res->mode);
+}
+
+cmdline_parse_token_string_t cmd_setfwd_retry_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_retry_mode_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_setfwd_retry_fwd =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_retry_mode_result,
+ fwd, "fwd");
+cmdline_parse_token_string_t cmd_setfwd_retry_mode =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_retry_mode_result,
+ mode,
+ "" /* defined at init */);
+cmdline_parse_token_string_t cmd_setfwd_retry_retry =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_retry_mode_result,
+ retry, "retry");
+
+cmdline_parse_inst_t cmd_set_fwd_retry_mode = {
+ .f = cmd_set_fwd_retry_mode_parsed,
+ .data = NULL,
+ .help_str = NULL, /* defined at init */
+ .tokens = {
+ (void *)&cmd_setfwd_retry_set,
+ (void *)&cmd_setfwd_retry_fwd,
+ (void *)&cmd_setfwd_retry_mode,
+ (void *)&cmd_setfwd_retry_retry,
+ NULL,
+ },
+};
+
+static void cmd_set_fwd_retry_mode_init(void)
+{
+ char *modes, *c;
+ static char token[128];
+ static char help[256];
+ cmdline_parse_token_string_t *token_struct;
+
+ modes = list_pkt_forwarding_retry_modes();
+ snprintf(help, sizeof(help), "set fwd %s retry - "
+ "set packet forwarding mode with retry", modes);
+ cmd_set_fwd_retry_mode.help_str = help;
+
+ /* string token separator is # */
+ for (c = token; *modes != '\0'; modes++)
+ if (*modes == '|')
+ *c++ = '#';
+ else
+ *c++ = *modes;
+ token_struct = (cmdline_parse_token_string_t *)
+ cmd_set_fwd_retry_mode.tokens[2];
+ token_struct->string_data.str = token;
+}
+
/* *** SET BURST TX DELAY TIME RETRY NUMBER *** */
struct cmd_set_burst_tx_retry_result {
cmdline_fixed_string_t set;
@@ -5240,6 +5411,46 @@ cmdline_parse_inst_t cmd_start_tx_first = {
},
};
+/* *** START FORWARDING WITH N TX BURST FIRST *** */
+struct cmd_start_tx_first_n_result {
+ cmdline_fixed_string_t start;
+ cmdline_fixed_string_t tx_first;
+ uint32_t tx_num;
+};
+
+static void
+cmd_start_tx_first_n_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_start_tx_first_n_result *res = parsed_result;
+
+ start_packet_forwarding(res->tx_num);
+}
+
+cmdline_parse_token_string_t cmd_start_tx_first_n_start =
+ TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_n_result,
+ start, "start");
+cmdline_parse_token_string_t cmd_start_tx_first_n_tx_first =
+ TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_n_result,
+ tx_first, "tx_first");
+cmdline_parse_token_num_t cmd_start_tx_first_n_tx_num =
+ TOKEN_NUM_INITIALIZER(struct cmd_start_tx_first_n_result,
+ tx_num, UINT32);
+
+cmdline_parse_inst_t cmd_start_tx_first_n = {
+ .f = cmd_start_tx_first_n_parsed,
+ .data = NULL,
+ .help_str = "start packet forwarding, after sending <num> "
+ "bursts of packets",
+ .tokens = {
+ (void *)&cmd_start_tx_first_n_start,
+ (void *)&cmd_start_tx_first_n_tx_first,
+ (void *)&cmd_start_tx_first_n_tx_num,
+ NULL,
+ },
+};
+
/* *** SET LINK UP *** */
struct cmd_set_link_up_result {
cmdline_fixed_string_t set;
@@ -5336,7 +5547,7 @@ static void cmd_showcfg_parsed(void *parsed_result,
else if (!strcmp(res->what, "cores"))
fwd_lcores_config_display();
else if (!strcmp(res->what, "fwd"))
- fwd_config_display();
+ pkt_fwd_config_display(&cur_fwd_config);
else if (!strcmp(res->what, "txpkts"))
show_tx_pkt_segments();
}
@@ -7191,8 +7402,6 @@ static void cmd_dump_parsed(void *parsed_result,
rte_dump_physmem_layout(stdout);
else if (!strcmp(res->dump, "dump_memzone"))
rte_memzone_dump(stdout);
- else if (!strcmp(res->dump, "dump_log_history"))
- rte_log_dump_history(stdout);
else if (!strcmp(res->dump, "dump_struct_sizes"))
dump_struct_sizes();
else if (!strcmp(res->dump, "dump_ring"))
@@ -7207,7 +7416,6 @@ cmdline_parse_token_string_t cmd_dump_dump =
TOKEN_STRING_INITIALIZER(struct cmd_dump_result, dump,
"dump_physmem#"
"dump_memzone#"
- "dump_log_history#"
"dump_struct_sizes#"
"dump_ring#"
"dump_mempool#"
@@ -9299,6 +9507,10 @@ flowtype_to_str(uint16_t ftype)
{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
+ {"port", RTE_ETH_FLOW_PORT},
+ {"vxlan", RTE_ETH_FLOW_VXLAN},
+ {"geneve", RTE_ETH_FLOW_GENEVE},
+ {"nvgre", RTE_ETH_FLOW_NVGRE},
};
for (i = 0; i < RTE_DIM(ftype_table); i++) {
@@ -10399,6 +10611,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_showcfg,
(cmdline_parse_inst_t *)&cmd_start,
(cmdline_parse_inst_t *)&cmd_start_tx_first,
+ (cmdline_parse_inst_t *)&cmd_start_tx_first_n,
(cmdline_parse_inst_t *)&cmd_set_link_up,
(cmdline_parse_inst_t *)&cmd_set_link_down,
(cmdline_parse_inst_t *)&cmd_reset,
@@ -10408,6 +10621,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_set_fwd_list,
(cmdline_parse_inst_t *)&cmd_set_fwd_mask,
(cmdline_parse_inst_t *)&cmd_set_fwd_mode,
+ (cmdline_parse_inst_t *)&cmd_set_fwd_retry_mode,
(cmdline_parse_inst_t *)&cmd_set_burst_tx_retry,
(cmdline_parse_inst_t *)&cmd_set_promisc_mode_one,
(cmdline_parse_inst_t *)&cmd_set_promisc_mode_all,
@@ -10478,6 +10692,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_config_rx_mode_flag,
(cmdline_parse_inst_t *)&cmd_config_rss,
(cmdline_parse_inst_t *)&cmd_config_rxtx_queue,
+ (cmdline_parse_inst_t *)&cmd_config_txqflags,
(cmdline_parse_inst_t *)&cmd_config_rss_reta,
(cmdline_parse_inst_t *)&cmd_showport_reta,
(cmdline_parse_inst_t *)&cmd_config_burst,
@@ -10546,6 +10761,7 @@ prompt(void)
{
/* initialize non-constant commands */
cmd_set_fwd_mode_init();
+ cmd_set_fwd_retry_mode_init();
testpmd_cl = cmdline_stdin_new(main_ctx, "testpmd> ");
if (testpmd_cl == NULL)
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index b1bbec6d..c5865f95 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
+#include <rte_cycles.h>
#include "testpmd.h"
@@ -137,6 +138,11 @@ static const struct rss_type_info rss_type_table[] = {
{ "ipv6-ex", ETH_RSS_IPV6_EX },
{ "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
{ "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
+ { "port", ETH_RSS_PORT },
+ { "vxlan", ETH_RSS_VXLAN },
+ { "geneve", ETH_RSS_GENEVE },
+ { "nvgre", ETH_RSS_NVGRE },
+
};
static void
@@ -150,6 +156,11 @@ print_ethaddr(const char *name, struct ether_addr *eth_addr)
void
nic_stats_display(portid_t port_id)
{
+ static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
+ static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
+ static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
+ uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
+ uint64_t mpps_rx, mpps_tx;
struct rte_eth_stats stats;
struct rte_port *port = &ports[port_id];
uint8_t i;
@@ -209,6 +220,23 @@ nic_stats_display(portid_t port_id)
}
}
+ diff_cycles = prev_cycles[port_id];
+ prev_cycles[port_id] = rte_rdtsc();
+ if (diff_cycles > 0)
+ diff_cycles = prev_cycles[port_id] - diff_cycles;
+
+ diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id];
+ diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id];
+ prev_pkts_rx[port_id] = stats.ipackets;
+ prev_pkts_tx[port_id] = stats.opackets;
+ mpps_rx = diff_cycles > 0 ?
+ diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
+ mpps_tx = diff_cycles > 0 ?
+ diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
+ printf("\n Throughput (since last show)\n");
+ printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n",
+ mpps_rx, mpps_tx);
+
printf(" %s############################%s\n",
nic_stats_border, nic_stats_border);
}
@@ -232,29 +260,56 @@ nic_stats_clear(portid_t port_id)
void
nic_xstats_display(portid_t port_id)
{
- struct rte_eth_xstats *xstats;
- int len, ret, i;
+ struct rte_eth_xstat *xstats;
+ int cnt_xstats, idx_xstat;
+ struct rte_eth_xstat_name *xstats_names;
printf("###### NIC extended statistics for port %-2d\n", port_id);
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ printf("Error: Invalid port number %i\n", port_id);
+ return;
+ }
- len = rte_eth_xstats_get(port_id, NULL, 0);
- if (len < 0) {
- printf("Cannot get xstats count\n");
+ /* Get count */
+ cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
+ if (cnt_xstats < 0) {
+ printf("Error: Cannot get count of xstats\n");
return;
}
- xstats = malloc(sizeof(xstats[0]) * len);
+
+ /* Get id-name lookup table */
+ xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
+ if (xstats_names == NULL) {
+ printf("Cannot allocate memory for xstats lookup\n");
+ return;
+ }
+ if (cnt_xstats != rte_eth_xstats_get_names(
+ port_id, xstats_names, cnt_xstats)) {
+ printf("Error: Cannot get xstats lookup\n");
+ free(xstats_names);
+ return;
+ }
+
+ /* Get stats themselves */
+ xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
if (xstats == NULL) {
printf("Cannot allocate memory for xstats\n");
+ free(xstats_names);
return;
}
- ret = rte_eth_xstats_get(port_id, xstats, len);
- if (ret < 0 || ret > len) {
- printf("Cannot get xstats\n");
+ if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
+ printf("Error: Unable to get xstats\n");
+ free(xstats_names);
free(xstats);
return;
}
- for (i = 0; i < len; i++)
- printf("%s: %"PRIu64"\n", xstats[i].name, xstats[i].value);
+
+ /* Display xstats */
+ for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++)
+ printf("%s: %"PRIu64"\n",
+ xstats_names[idx_xstat].name,
+ xstats[idx_xstat].value);
+ free(xstats_names);
free(xstats);
}
@@ -893,8 +948,9 @@ fwd_lcores_config_display(void)
void
rxtx_config_display(void)
{
- printf(" %s packet forwarding - CRC stripping %s - "
+ printf(" %s packet forwarding%s - CRC stripping %s - "
"packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
+ retry_enabled == 0 ? "" : " with retry",
rx_mode.hw_strip_crc ? "enabled" : "disabled",
nb_pkt_per_burst);
@@ -1131,6 +1187,7 @@ simple_fwd_config_setup(void)
fwd_streams[i]->tx_port = fwd_ports_ids[j];
fwd_streams[i]->tx_queue = 0;
fwd_streams[i]->peer_addr = j;
+ fwd_streams[i]->retry_enabled = retry_enabled;
if (port_topology == PORT_TOPOLOGY_PAIRED) {
fwd_streams[j]->rx_port = fwd_ports_ids[j];
@@ -1138,6 +1195,7 @@ simple_fwd_config_setup(void)
fwd_streams[j]->tx_port = fwd_ports_ids[i];
fwd_streams[j]->tx_queue = 0;
fwd_streams[j]->peer_addr = i;
+ fwd_streams[j]->retry_enabled = retry_enabled;
}
}
}
@@ -1173,10 +1231,8 @@ rss_fwd_config_setup(void)
cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
cur_fwd_config.nb_fwd_streams =
(streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
- if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
- cur_fwd_config.nb_fwd_streams =
- (streamid_t)cur_fwd_config.nb_fwd_lcores;
- else
+
+ if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
cur_fwd_config.nb_fwd_lcores =
(lcoreid_t)cur_fwd_config.nb_fwd_streams;
@@ -1185,7 +1241,7 @@ rss_fwd_config_setup(void)
setup_fwd_config_of_each_lcore(&cur_fwd_config);
rxp = 0; rxq = 0;
- for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
+ for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_streams; lc_id++) {
struct fwd_stream *fs;
fs = fwd_streams[lc_id];
@@ -1206,6 +1262,7 @@ rss_fwd_config_setup(void)
fs->tx_port = fwd_ports_ids[txp];
fs->tx_queue = rxq;
fs->peer_addr = fs->tx_port;
+ fs->retry_enabled = retry_enabled;
rxq = (queueid_t) (rxq + 1);
if (rxq < nb_q)
continue;
@@ -1280,6 +1337,7 @@ dcb_fwd_config_setup(void)
fs->tx_port = fwd_ports_ids[txp];
fs->tx_queue = txq + j % nb_tx_queue;
fs->peer_addr = fs->tx_port;
+ fs->retry_enabled = retry_enabled;
}
fwd_lcores[lc_id]->stream_nb +=
rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
@@ -1350,6 +1408,7 @@ icmp_echo_config_setup(void)
fs->tx_port = fs->rx_port;
fs->tx_queue = rxq;
fs->peer_addr = fs->tx_port;
+ fs->retry_enabled = retry_enabled;
if (verbose_level > 0)
printf(" stream=%d port=%d rxq=%d txq=%d\n",
sm_id, fs->rx_port, fs->rx_queue,
@@ -1381,21 +1440,22 @@ fwd_config_setup(void)
simple_fwd_config_setup();
}
-static void
+void
pkt_fwd_config_display(struct fwd_config *cfg)
{
struct fwd_stream *fs;
lcoreid_t lc_id;
streamid_t sm_id;
- printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
+ printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
"NUMA support %s, MP over anonymous pages %s\n",
cfg->fwd_eng->fwd_mode_name,
+ retry_enabled == 0 ? "" : " with retry",
cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
numa_support == 1 ? "enabled" : "disabled",
mp_anon != 0 ? "enabled" : "disabled");
- if (strcmp(cfg->fwd_eng->fwd_mode_name, "mac_retry") == 0)
+ if (retry_enabled)
printf("TX retry num: %u, delay between TX retries: %uus\n",
burst_tx_retry_num, burst_tx_delay_time);
for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
@@ -1420,14 +1480,6 @@ pkt_fwd_config_display(struct fwd_config *cfg)
printf("\n");
}
-
-void
-fwd_config_display(void)
-{
- fwd_config_setup();
- pkt_fwd_config_display(&cur_fwd_config);
-}
-
int
set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
{
@@ -1565,6 +1617,22 @@ set_fwd_ports_number(uint16_t nb_pt)
(unsigned int) nb_fwd_ports);
}
+int
+port_is_forwarding(portid_t port_id)
+{
+ unsigned int i;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return -1;
+
+ for (i = 0; i < nb_fwd_ports; i++) {
+ if (fwd_ports_ids[i] == port_id)
+ return 1;
+ }
+
+ return 0;
+}
+
void
set_nb_pkt_per_burst(uint16_t nb)
{
@@ -1673,8 +1741,35 @@ list_pkt_forwarding_modes(void)
if (strlen (fwd_modes) == 0) {
while ((fwd_eng = fwd_engines[i++]) != NULL) {
- strcat(fwd_modes, fwd_eng->fwd_mode_name);
- strcat(fwd_modes, separator);
+ strncat(fwd_modes, fwd_eng->fwd_mode_name,
+ sizeof(fwd_modes) - strlen(fwd_modes) - 1);
+ strncat(fwd_modes, separator,
+ sizeof(fwd_modes) - strlen(fwd_modes) - 1);
+ }
+ fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
+ }
+
+ return fwd_modes;
+}
+
+char*
+list_pkt_forwarding_retry_modes(void)
+{
+ static char fwd_modes[128] = "";
+ const char *separator = "|";
+ struct fwd_engine *fwd_eng;
+ unsigned i = 0;
+
+ if (strlen(fwd_modes) == 0) {
+ while ((fwd_eng = fwd_engines[i++]) != NULL) {
+ if (fwd_eng == &rx_only_engine)
+ continue;
+ strncat(fwd_modes, fwd_eng->fwd_mode_name,
+ sizeof(fwd_modes) -
+ strlen(fwd_modes) - 1);
+ strncat(fwd_modes, separator,
+ sizeof(fwd_modes) -
+ strlen(fwd_modes) - 1);
}
fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
}
@@ -1691,8 +1786,9 @@ set_pkt_forwarding_mode(const char *fwd_mode_name)
i = 0;
while ((fwd_eng = fwd_engines[i]) != NULL) {
if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
- printf("Set %s packet forwarding mode\n",
- fwd_mode_name);
+ printf("Set %s packet forwarding mode%s\n",
+ fwd_mode_name,
+ retry_enabled == 0 ? "" : " with retry");
cur_fwd_eng = fwd_eng;
return;
}
@@ -2028,6 +2124,10 @@ flowtype_to_str(uint16_t flow_type)
{"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
{"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
{"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
+ {"port", RTE_ETH_FLOW_PORT},
+ {"vxlan", RTE_ETH_FLOW_VXLAN},
+ {"geneve", RTE_ETH_FLOW_GENEVE},
+ {"nvgre", RTE_ETH_FLOW_NVGRE},
};
for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 7e4f6620..ac4bd8f4 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -643,6 +643,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint16_t i;
uint64_t ol_flags;
uint16_t testpmd_ol_flags;
+ uint32_t retry;
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
struct testpmd_offload_info info;
@@ -676,6 +677,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
info.tso_segsz = txp->tso_segsz;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
ol_flags = 0;
info.is_tunnel = 0;
@@ -845,6 +849,17 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_rx - nb_tx);
+ }
+ }
fs->tx_packets += nb_tx;
fs->rx_bad_ip_csum += rx_bad_ip_csum;
fs->rx_bad_l4_csum += rx_bad_l4_csum;
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 0f307e85..a6abe91e 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -89,17 +89,6 @@ static struct ether_addr cfg_ether_dst =
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
-static inline struct rte_mbuf *
-tx_mbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
-
static inline uint16_t
ip_sum(const unaligned_uint16_t *hdr, int hdr_len)
{
@@ -142,6 +131,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
uint16_t nb_tx;
uint16_t nb_pkt;
uint16_t i;
+ uint32_t retry;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
@@ -167,7 +157,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
ol_flags = ports[fs->tx_port].tx_ol_flags;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
- pkt = tx_mbuf_alloc(mbp);
+ pkt = rte_mbuf_raw_alloc(mbp);
if (!pkt)
break;
@@ -218,6 +208,17 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_rx - nb_tx);
+ }
+ }
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index e510f9bf..be308c9f 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -311,6 +311,7 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
struct ipv4_hdr *ip_h;
struct icmp_hdr *icmp_h;
struct ether_addr eth_addr;
+ uint32_t retry;
uint32_t ip_addr;
uint16_t nb_rx;
uint16_t nb_tx;
@@ -346,6 +347,9 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
fs->rx_packets += nb_rx;
nb_replies = 0;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
pkt = pkts_burst[i];
eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
eth_type = RTE_BE_TO_CPU_16(eth_h->ether_type);
@@ -515,6 +519,20 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
if (nb_replies > 0) {
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
nb_replies);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_replies) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_replies &&
+ retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port,
+ fs->tx_queue,
+ &pkts_burst[nb_tx],
+ nb_replies - nb_tx);
+ }
+ }
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 8840d868..7b6033a5 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -80,6 +80,8 @@ pkt_burst_io_forward(struct fwd_stream *fs)
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
uint16_t nb_rx;
uint16_t nb_tx;
+ uint32_t retry;
+
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
@@ -93,16 +95,28 @@ pkt_burst_io_forward(struct fwd_stream *fs)
/*
* Receive a burst of packets and forward them.
*/
- nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
- nb_pkt_per_burst);
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
+ pkts_burst, nb_pkt_per_burst);
if (unlikely(nb_rx == 0))
return;
+ fs->rx_packets += nb_rx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
- fs->rx_packets += nb_rx;
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ pkts_burst, nb_rx);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_rx - nb_tx);
+ }
+ }
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
diff --git a/app/test-pmd/macfwd-retry.c b/app/test-pmd/macfwd-retry.c
deleted file mode 100644
index 3a96b3df..00000000
--- a/app/test-pmd/macfwd-retry.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdarg.h>
-#include <string.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <inttypes.h>
-
-#include <sys/queue.h>
-#include <sys/stat.h>
-
-#include <rte_common.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_cycles.h>
-#include <rte_memory.h>
-#include <rte_memcpy.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_memory.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_ip.h>
-#include <rte_string_fns.h>
-
-#include "testpmd.h"
-
-#define BURST_TX_WAIT_US 10
-#define BURST_TX_RETRIES 5
-
-/*
- * Global variables that control number of retires and
- * timeout (in us) between retires.
- */
-uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
-uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
-
-/*
- * Forwarding of packets in MAC mode with a wait and retry on TX to reduce packet loss.
- * Change the source and the destination Ethernet addressed of packets
- * before forwarding them.
- */
-static void
-pkt_burst_mac_retry_forward(struct fwd_stream *fs)
-{
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_mbuf *mb;
- struct ether_hdr *eth_hdr;
- uint32_t retry;
- uint16_t nb_rx;
- uint16_t nb_tx;
- uint16_t i;
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- uint64_t start_tsc;
- uint64_t end_tsc;
- uint64_t core_cycles;
-#endif
-
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- start_tsc = rte_rdtsc();
-#endif
-
- /*
- * Receive a burst of packets and forward them.
- */
- nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
- nb_pkt_per_burst);
- if (unlikely(nb_rx == 0))
- return;
-
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
-#endif
- fs->rx_packets += nb_rx;
- for (i = 0; i < nb_rx; i++) {
- mb = pkts_burst[i];
- eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
- ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
- &eth_hdr->d_addr);
- ether_addr_copy(&ports[fs->tx_port].eth_addr,
- &eth_hdr->s_addr);
- }
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
-
- /*
- * If not all packets have been TX'd then wait and retry.
- */
- if (unlikely(nb_tx < nb_rx)) {
- for (retry = 0; retry < burst_tx_retry_num; retry++) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_rx - nb_tx);
- if (nb_tx == nb_rx)
- break;
- }
- }
-
- fs->tx_packets += nb_tx;
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
-#endif
- if (unlikely(nb_tx < nb_rx)) {
- fs->fwd_dropped += (nb_rx - nb_tx);
- do {
- rte_pktmbuf_free(pkts_burst[nb_tx]);
- } while (++nb_tx < nb_rx);
- }
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- end_tsc = rte_rdtsc();
- core_cycles = (end_tsc - start_tsc);
- fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
-#endif
-}
-
-struct fwd_engine mac_retry_fwd_engine = {
- .fwd_mode_name = "mac_retry",
- .port_fwd_begin = NULL,
- .port_fwd_end = NULL,
- .packet_fwd = pkt_burst_mac_retry_forward,
-};
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 3b7fffb7..5d1c1617 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -81,6 +81,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
struct rte_port *txp;
struct rte_mbuf *mb;
struct ether_hdr *eth_hdr;
+ uint32_t retry;
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t i;
@@ -113,6 +114,9 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
mb = pkts_burst[i];
eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
@@ -126,6 +130,18 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
mb->vlan_tci_outer = txp->tx_vlan_id_outer;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_rx - nb_tx);
+ }
+ }
+
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 154889d1..4b0dbeb5 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -84,6 +84,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
uint16_t nb_rx;
uint16_t nb_tx;
uint16_t i;
+ uint32_t retry;
uint64_t ol_flags = 0;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
@@ -113,6 +114,9 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
mb = pkts_burst[i];
eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
@@ -128,6 +132,17 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
mb->vlan_tci_outer = txp->tx_vlan_id_outer;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_rx - nb_tx);
+ }
+ }
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
diff --git a/app/test-pmd/mempool_anon.c b/app/test-pmd/mempool_anon.c
deleted file mode 100644
index 47304329..00000000
--- a/app/test-pmd/mempool_anon.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include "mempool_osdep.h"
-#include <rte_errno.h>
-
-#ifdef RTE_EXEC_ENV_LINUXAPP
-
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/mman.h>
-
-
-#define PAGEMAP_FNAME "/proc/self/pagemap"
-
-/*
- * the pfn (page frame number) are bits 0-54 (see pagemap.txt in linux
- * Documentation).
- */
-#define PAGEMAP_PFN_BITS 54
-#define PAGEMAP_PFN_MASK RTE_LEN2MASK(PAGEMAP_PFN_BITS, phys_addr_t)
-
-
-static int
-get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num, uint32_t pg_sz)
-{
- int32_t fd, rc;
- uint32_t i, nb;
- off_t ofs;
-
- ofs = (uintptr_t)va / pg_sz * sizeof(*pa);
- nb = pg_num * sizeof(*pa);
-
- if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0)
- return ENOENT;
-
- if ((rc = pread(fd, pa, nb, ofs)) < 0 || (rc -= nb) != 0) {
-
- RTE_LOG(ERR, USER1, "failed read of %u bytes from \'%s\' "
- "at offset %zu, error code: %d\n",
- nb, PAGEMAP_FNAME, (size_t)ofs, errno);
- rc = ENOENT;
- }
-
- close(fd);
-
- for (i = 0; i != pg_num; i++)
- pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz;
-
- return rc;
-}
-
-struct rte_mempool *
-mempool_anon_create(const char *name, unsigned elt_num, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags)
-{
- struct rte_mempool *mp;
- phys_addr_t *pa;
- char *va, *uv;
- uint32_t n, pg_num, pg_shift, pg_sz, total_size;
- size_t sz;
- ssize_t usz;
- int32_t rc;
-
- rc = ENOMEM;
- mp = NULL;
-
- pg_sz = getpagesize();
- if (rte_is_power_of_2(pg_sz) == 0) {
- rte_errno = EINVAL;
- return mp;
- }
-
- pg_shift = rte_bsf32(pg_sz);
-
- total_size = rte_mempool_calc_obj_size(elt_size, flags, NULL);
-
- /* calc max memory size and max number of pages needed. */
- sz = rte_mempool_xmem_size(elt_num, total_size, pg_shift);
- pg_num = sz >> pg_shift;
-
- /* get chunk of virtually continuos memory.*/
- if ((va = mmap(NULL, sz, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS | MAP_LOCKED,
- -1, 0)) == MAP_FAILED) {
- RTE_LOG(ERR, USER1, "%s(%s) failed mmap of %zu bytes, "
- "error code: %d\n",
- __func__, name, sz, errno);
- rte_errno = rc;
- return mp;
- }
-
- /* extract physical mappings of the allocated memory. */
- if ((pa = calloc(pg_num, sizeof (*pa))) != NULL &&
- (rc = get_phys_map(va, pa, pg_num, pg_sz)) == 0) {
-
- /*
- * Check that allocated size is big enough to hold elt_num
- * objects and a calcualte how many bytes are actually required.
- */
-
- if ((usz = rte_mempool_xmem_usage(va, elt_num, total_size, pa,
- pg_num, pg_shift)) < 0) {
-
- n = -usz;
- rc = ENOENT;
- RTE_LOG(ERR, USER1, "%s(%s) only %u objects from %u "
- "requested can be created over "
- "mmaped region %p of %zu bytes\n",
- __func__, name, n, elt_num, va, sz);
- } else {
-
- /* unmap unused pages if any */
- if ((size_t)usz < sz) {
-
- uv = va + usz;
- usz = sz - usz;
-
- RTE_LOG(INFO, USER1,
- "%s(%s): unmap unused %zu of %zu "
- "mmaped bytes @%p\n",
- __func__, name, (size_t)usz, sz, uv);
- munmap(uv, usz);
- sz -= usz;
- pg_num = sz >> pg_shift;
- }
-
- if ((mp = rte_mempool_xmem_create(name, elt_num,
- elt_size, cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags, va, pa, pg_num,
- pg_shift)) != NULL)
-
- RTE_VERIFY(elt_num == mp->size);
- }
- }
-
- if (mp == NULL) {
- munmap(va, sz);
- rte_errno = rc;
- }
-
- free(pa);
- return mp;
-}
-
-#else /* RTE_EXEC_ENV_LINUXAPP */
-
-
-struct rte_mempool *
-mempool_anon_create(__rte_unused const char *name,
- __rte_unused unsigned elt_num, __rte_unused unsigned elt_size,
- __rte_unused unsigned cache_size,
- __rte_unused unsigned private_data_size,
- __rte_unused rte_mempool_ctor_t *mp_init,
- __rte_unused void *mp_init_arg,
- __rte_unused rte_mempool_obj_ctor_t *obj_init,
- __rte_unused void *obj_init_arg,
- __rte_unused int socket_id, __rte_unused unsigned flags)
-{
- rte_errno = ENOTSUP;
- return NULL;
-}
-
-#endif /* RTE_EXEC_ENV_LINUXAPP */
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 55572ebe..8792c2c6 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -526,6 +526,7 @@ launch_args_parse(int argc, char** argv)
{ "pkt-filter-drop-queue", 1, 0, 0 },
{ "crc-strip", 0, 0, 0 },
{ "enable-rx-cksum", 0, 0, 0 },
+ { "enable-scatter", 0, 0, 0 },
{ "disable-hw-vlan", 0, 0, 0 },
{ "disable-hw-vlan-filter", 0, 0, 0 },
{ "disable-hw-vlan-strip", 0, 0, 0 },
@@ -764,6 +765,8 @@ launch_args_parse(int argc, char** argv)
}
if (!strcmp(lgopts[opt_idx].name, "crc-strip"))
rx_mode.hw_strip_crc = 1;
+ if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
+ rx_mode.enable_scatter = 1;
if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
rx_mode.hw_ip_checksum = 1;
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index 14555abc..fbf287dc 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -156,9 +156,9 @@ pkt_burst_receive(struct fwd_stream *fs)
printf("hash=0x%x ID=0x%x ",
mb->hash.fdir.hash, mb->hash.fdir.id);
}
- if (ol_flags & PKT_RX_VLAN_PKT)
+ if (ol_flags & PKT_RX_VLAN_STRIPPED)
printf(" - VLAN tci=0x%x", mb->vlan_tci);
- if (ol_flags & PKT_RX_QINQ_PKT)
+ if (ol_flags & PKT_RX_QINQ_STRIPPED)
printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
mb->vlan_tci, mb->vlan_tci_outer);
if (mb->packet_type) {
@@ -179,6 +179,9 @@ pkt_burst_receive(struct fwd_stream *fs)
case RTE_PTYPE_L2_ETHER_LLDP:
printf(" - (outer) L2 type: ETHER_LLDP");
break;
+ case RTE_PTYPE_L2_ETHER_NSH:
+ printf(" - (outer) L2 type: ETHER_NSH");
+ break;
default:
printf(" - (outer) L2 type: Unknown");
break;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 26a174c1..06885ceb 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -49,6 +49,7 @@
#include <inttypes.h>
#include <rte_common.h>
+#include <rte_errno.h>
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_debug.h>
@@ -75,9 +76,11 @@
#ifdef RTE_LIBRTE_PMD_XENVIRT
#include <rte_eth_xenvirt.h>
#endif
+#ifdef RTE_LIBRTE_PDUMP
+#include <rte_pdump.h>
+#endif
#include "testpmd.h"
-#include "mempool_osdep.h"
uint16_t verbose_level = 0; /**< Silent by default. */
@@ -144,7 +147,6 @@ streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
struct fwd_engine * fwd_engines[] = {
&io_fwd_engine,
&mac_fwd_engine,
- &mac_retry_fwd_engine,
&mac_swap_engine,
&flow_gen_engine,
&rx_only_engine,
@@ -159,6 +161,9 @@ struct fwd_engine * fwd_engines[] = {
struct fwd_config cur_fwd_config;
struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
+uint32_t retry_enabled;
+uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
+uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
@@ -416,6 +421,10 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
+ RTE_LOG(INFO, USER1,
+ "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
+ pool_name, nb_mbuf, mbuf_seg_size, socket_id);
+
#ifdef RTE_LIBRTE_PMD_XENVIRT
rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
(unsigned) mb_mempool_cache,
@@ -427,22 +436,29 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
/* if the former XEN allocation failed fall back to normal allocation */
if (rte_mp == NULL) {
- if (mp_anon != 0)
- rte_mp = mempool_anon_create(pool_name, nb_mbuf,
- mb_size, (unsigned) mb_mempool_cache,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- socket_id, 0);
- else
+ if (mp_anon != 0) {
+ rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
+ mb_size, (unsigned) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ socket_id, 0);
+
+ if (rte_mempool_populate_anon(rte_mp) == 0) {
+ rte_mempool_free(rte_mp);
+ rte_mp = NULL;
+ }
+ rte_pktmbuf_pool_init(rte_mp, NULL);
+ rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
+ } else {
/* wrapper to rte_mempool_create() */
rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
mb_mempool_cache, 0, mbuf_seg_size, socket_id);
+ }
}
if (rte_mp == NULL) {
- rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
- "failed\n", socket_id);
+ rte_exit(EXIT_FAILURE,
+ "Creation of mbuf pool for socket %u failed: %s\n",
+ socket_id, rte_strerror(rte_errno));
} else if (verbose_level > 0) {
rte_mempool_dump(stdout, rte_mp);
}
@@ -580,6 +596,8 @@ init_config(void)
/* Configuration of packet forwarding streams. */
if (init_fwd_streams() < 0)
rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
+
+ fwd_config_setup();
}
@@ -981,6 +999,12 @@ start_packet_forwarding(int with_tx_first)
printf("Packet forwarding already started\n");
return;
}
+
+ if (init_fwd_streams() < 0) {
+ printf("Fail from init_fwd_streams()\n");
+ return;
+ }
+
if(dcb_test) {
for (i = 0; i < nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
@@ -1003,6 +1027,7 @@ start_packet_forwarding(int with_tx_first)
flush_fwd_rx_queues();
fwd_config_setup();
+ pkt_fwd_config_display(&cur_fwd_config);
rxtx_config_display();
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
@@ -1036,8 +1061,11 @@ start_packet_forwarding(int with_tx_first)
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
(*port_fwd_begin)(fwd_ports_ids[i]);
}
- launch_packet_forwarding(run_one_txonly_burst_on_core);
- rte_eal_mp_wait_lcore();
+ while (with_tx_first--) {
+ launch_packet_forwarding(
+ run_one_txonly_burst_on_core);
+ rte_eal_mp_wait_lcore();
+ }
port_fwd_end = tx_only_engine.port_fwd_end;
if (port_fwd_end != NULL) {
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
@@ -1070,10 +1098,6 @@ stop_packet_forwarding(void)
#endif
static const char *acc_stats_border = "+++++++++++++++";
- if (all_ports_started() == 0) {
- printf("Not all ports were started\n");
- return;
- }
if (test_done) {
printf("Packet forwarding not started\n");
return;
@@ -1268,19 +1292,9 @@ start_port(portid_t pid)
struct rte_port *port;
struct ether_addr mac_addr;
- if (test_done == 0) {
- printf("Please stop forwarding first\n");
- return -1;
- }
-
if (port_id_is_invalid(pid, ENABLED_WARN))
return 0;
- if (init_fwd_streams() < 0) {
- printf("Fail from init_fwd_streams()\n");
- return -1;
- }
-
if(dcb_config)
dcb_test = 1;
FOREACH_PORT(pi, ports) {
@@ -1351,7 +1365,7 @@ start_port(portid_t pid)
if (mp == NULL) {
printf("Failed to setup RX queue:"
"No mempool allocation"
- "on the socket %d\n",
+ " on the socket %d\n",
rxring_numa[pi]);
return -1;
}
@@ -1359,17 +1373,23 @@ start_port(portid_t pid)
diag = rte_eth_rx_queue_setup(pi, qi,
nb_rxd,rxring_numa[pi],
&(port->rx_conf),mp);
- }
- else
+ } else {
+ struct rte_mempool *mp =
+ mbuf_pool_find(port->socket_id);
+ if (mp == NULL) {
+ printf("Failed to setup RX queue:"
+ "No mempool allocation"
+ " on the socket %d\n",
+ port->socket_id);
+ return -1;
+ }
diag = rte_eth_rx_queue_setup(pi, qi,
nb_rxd,port->socket_id,
- &(port->rx_conf),
- mbuf_pool_find(port->socket_id));
-
+ &(port->rx_conf), mp);
+ }
if (diag == 0)
continue;
-
/* Fail to setup rx queue, return */
if (rte_atomic16_cmpset(&(port->port_status),
RTE_PORT_HANDLING,
@@ -1424,10 +1444,6 @@ stop_port(portid_t pid)
struct rte_port *port;
int need_check_link_status = 0;
- if (test_done == 0) {
- printf("Please stop forwarding first\n");
- return;
- }
if (dcb_test) {
dcb_test = 0;
dcb_config = 0;
@@ -1442,6 +1458,16 @@ stop_port(portid_t pid)
if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
+ if (port_is_forwarding(pi) != 0 && test_done == 0) {
+ printf("Please remove port %d from forwarding configuration.\n", pi);
+ continue;
+ }
+
+ if (port_is_bonding_slave(pi)) {
+ printf("Please remove port %d from bonded device.\n", pi);
+ continue;
+ }
+
port = &ports[pi];
if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
RTE_PORT_HANDLING) == 0)
@@ -1466,11 +1492,6 @@ close_port(portid_t pid)
portid_t pi;
struct rte_port *port;
- if (test_done == 0) {
- printf("Please stop forwarding first\n");
- return;
- }
-
if (port_id_is_invalid(pid, ENABLED_WARN))
return;
@@ -1480,6 +1501,16 @@ close_port(portid_t pid)
if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
+ if (port_is_forwarding(pi) != 0 && test_done == 0) {
+ printf("Please remove port %d from forwarding configuration.\n", pi);
+ continue;
+ }
+
+ if (port_is_bonding_slave(pi)) {
+ printf("Please remove port %d from bonded device.\n", pi);
+ continue;
+ }
+
port = &ports[pi];
if (rte_atomic16_cmpset(&(port->port_status),
RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
@@ -1497,7 +1528,7 @@ close_port(portid_t pid)
if (rte_atomic16_cmpset(&(port->port_status),
RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
- printf("Port %d can not be set into stopped\n", pi);
+ printf("Port %d cannot be set to closed\n", pi);
}
printf("Done\n");
@@ -1506,7 +1537,8 @@ close_port(portid_t pid)
void
attach_port(char *identifier)
{
- portid_t i, j, pi = 0;
+ portid_t pi = 0;
+ unsigned int socket_id;
printf("Attaching a new port...\n");
@@ -1515,30 +1547,19 @@ attach_port(char *identifier)
return;
}
- if (test_done == 0) {
- printf("Please stop forwarding first\n");
- return;
- }
-
if (rte_eth_dev_attach(identifier, &pi))
return;
ports[pi].enabled = 1;
- reconfig(pi, rte_eth_dev_socket_id(pi));
+ socket_id = (unsigned)rte_eth_dev_socket_id(pi);
+ /* if socket_id is invalid, set to 0 */
+ if (check_socket_id(socket_id) < 0)
+ socket_id = 0;
+ reconfig(pi, socket_id);
rte_eth_promiscuous_enable(pi);
nb_ports = rte_eth_dev_count();
- /* set_default_fwd_ports_config(); */
- memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
- i = 0;
- FOREACH_PORT(j, ports) {
- fwd_ports_ids[i] = j;
- i++;
- }
- nb_cfg_ports = nb_ports;
- nb_fwd_ports++;
-
ports[pi].port_status = RTE_PORT_STOPPED;
printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
@@ -1548,7 +1569,6 @@ attach_port(char *identifier)
void
detach_port(uint8_t port_id)
{
- portid_t i, pi = 0;
char name[RTE_ETH_NAME_MAX_LEN];
printf("Detaching a port...\n");
@@ -1564,16 +1584,6 @@ detach_port(uint8_t port_id)
ports[port_id].enabled = 0;
nb_ports = rte_eth_dev_count();
- /* set_default_fwd_ports_config(); */
- memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
- i = 0;
- FOREACH_PORT(pi, ports) {
- fwd_ports_ids[i] = pi;
- i++;
- }
- nb_cfg_ports = nb_ports;
- nb_fwd_ports--;
-
printf("Port '%s' is detached. Now total ports is %d\n",
name, nb_ports);
printf("Done\n");
@@ -1843,6 +1853,14 @@ void clear_port_slave_flag(portid_t slave_pid)
port->slave_flag = 0;
}
+uint8_t port_is_bonding_slave(portid_t slave_pid)
+{
+ struct rte_port *port;
+
+ port = &ports[slave_pid];
+ return port->slave_flag;
+}
+
const uint16_t vlan_tags[] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
@@ -2018,6 +2036,10 @@ signal_handler(int signum)
if (signum == SIGINT || signum == SIGTERM) {
printf("\nSignal %d received, preparing to exit...\n",
signum);
+#ifdef RTE_LIBRTE_PDUMP
+ /* uninitialize packet capture framework */
+ rte_pdump_uninit();
+#endif
force_quit();
/* exit with the expected status */
signal(signum, SIG_DFL);
@@ -2038,6 +2060,11 @@ main(int argc, char** argv)
if (diag < 0)
rte_panic("Cannot init EAL\n");
+#ifdef RTE_LIBRTE_PDUMP
+ /* initialize packet capture framework */
+ rte_pdump_init(NULL);
+#endif
+
nb_ports = (portid_t) rte_eth_dev_count();
if (nb_ports == 0)
RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 0f72ca1f..2b281cc4 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -103,6 +103,8 @@ struct fwd_stream {
queueid_t tx_queue; /**< TX queue to send forwarded packets */
streamid_t peer_addr; /**< index of peer ethernet address of packets */
+ unsigned int retry_enabled;
+
/* "read-write" results */
unsigned int rx_packets; /**< received packets */
unsigned int tx_packets; /**< received packets transmitted */
@@ -220,9 +222,14 @@ struct fwd_engine {
packet_fwd_t packet_fwd; /**< Mandatory. */
};
+#define BURST_TX_WAIT_US 1
+#define BURST_TX_RETRIES 64
+
+extern uint32_t burst_tx_delay_time;
+extern uint32_t burst_tx_retry_num;
+
extern struct fwd_engine io_fwd_engine;
extern struct fwd_engine mac_fwd_engine;
-extern struct fwd_engine mac_retry_fwd_engine;
extern struct fwd_engine mac_swap_engine;
extern struct fwd_engine flow_gen_engine;
extern struct fwd_engine rx_only_engine;
@@ -380,6 +387,7 @@ extern int8_t tx_wthresh;
extern struct fwd_config cur_fwd_config;
extern struct fwd_engine *cur_fwd_eng;
+extern uint32_t retry_enabled;
extern struct fwd_lcore **fwd_lcores;
extern struct fwd_stream **fwd_streams;
@@ -472,7 +480,7 @@ void port_infos_display(portid_t port_id);
void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
void fwd_lcores_config_display(void);
-void fwd_config_display(void);
+void pkt_fwd_config_display(struct fwd_config *cfg);
void rxtx_config_display(void);
void fwd_config_setup(void);
void set_def_fwd_config(void);
@@ -500,6 +508,7 @@ void set_fwd_lcores_number(uint16_t nb_lc);
void set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt);
void set_fwd_ports_mask(uint64_t portmask);
void set_fwd_ports_number(uint16_t nb_pt);
+int port_is_forwarding(portid_t port_id);
void rx_vlan_strip_set(portid_t port_id, int on);
void rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on);
@@ -523,6 +532,7 @@ void show_tx_pkt_segments(void);
void set_tx_pkt_split(const char *name);
void set_nb_pkt_per_burst(uint16_t pkt_burst);
char *list_pkt_forwarding_modes(void);
+char *list_pkt_forwarding_retry_modes(void);
void set_pkt_forwarding_mode(const char *fwd_mode);
void start_packet_forwarding(int with_tx_first);
void stop_packet_forwarding(void);
@@ -531,6 +541,8 @@ void dev_set_link_down(portid_t pid);
void init_port_config(void);
void set_port_slave_flag(portid_t slave_pid);
void clear_port_slave_flag(portid_t slave_pid);
+uint8_t port_is_bonding_slave(portid_t slave_pid);
+
int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode,
enum rte_eth_nb_tcs num_tcs,
uint8_t pfc_en);
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index b37cae57..11fd681d 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -86,16 +86,6 @@
static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */
static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
-static inline struct rte_mbuf *
-tx_mbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
static void
copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
unsigned offset)
@@ -203,6 +193,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
uint16_t nb_tx;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
+ uint32_t retry;
uint64_t ol_flags = 0;
uint8_t i;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
@@ -225,7 +216,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
- pkt = tx_mbuf_alloc(mbp);
+ pkt = rte_mbuf_raw_alloc(mbp);
if (pkt == NULL) {
nomore_mbuf:
if (nb_pkt == 0)
@@ -240,7 +231,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
nb_segs = tx_pkt_nb_segs;
pkt_len = pkt->data_len;
for (i = 1; i < nb_segs; i++) {
- pkt_seg->next = tx_mbuf_alloc(mbp);
+ pkt_seg->next = rte_mbuf_raw_alloc(mbp);
if (pkt_seg->next == NULL) {
pkt->nb_segs = i;
rte_pktmbuf_free(pkt);
@@ -283,6 +274,17 @@ pkt_burst_transmit(struct fwd_stream *fs)
pkts_burst[nb_pkt] = pkt;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &pkts_burst[nb_tx], nb_pkt - nb_tx);
+ }
+ }
fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
diff --git a/app/test/Makefile b/app/test/Makefile
index a4907d59..2de8c7a6 100644
--- a/app/test/Makefile
+++ b/app/test/Makefile
@@ -33,6 +33,37 @@ include $(RTE_SDK)/mk/rte.vars.mk
ifeq ($(CONFIG_RTE_APP_TEST),y)
+# Define an externally linked resource. A linked resource is an arbitrary
+# file that is linked into the test binary. The application refers to this
+# resource by name. The linked generates identifiers beg_<name> and end_<name>
+# for referencing by the C code.
+#
+# Parameters: <unique name>, <file to be linked>
+define linked_resource
+SRCS-y += $(1).res.o
+$(1).res.o: $(2)
+ @ echo ' MKRES $$@'
+ $Q [ "$$(<D)" = . ] || ln -fs $$<
+ $Q $(OBJCOPY) -I binary -B $(RTE_OBJCOPY_ARCH) -O $(RTE_OBJCOPY_TARGET) \
+ --rename-section \
+ .data=.rodata,alloc,load,data,contents,readonly \
+ --redefine-sym _binary_$$(subst .,_,$$(<F))_start=beg_$(1) \
+ --redefine-sym _binary_$$(subst .,_,$$(<F))_end=end_$(1) \
+ --redefine-sym _binary_$$(subst .,_,$$(<F))_size=siz_$(1) \
+ $$(<F) $$@
+endef
+
+ifeq ($(CONFIG_RTE_APP_TEST_RESOURCE_TAR),y)
+define linked_tar_resource
+$(1).tar: $(2)
+ @ echo ' TAR $$@'
+ $Q tar -C $$(dir $$<) -cf $$@ $$(notdir $$<)
+$(call linked_resource,$(1),$(1).tar)
+endef
+else # ! CONFIG_RTE_APP_TEST_RESOURCE_TAR
+linked_tar_resource =
+endif # CONFIG_RTE_APP_TEST_RESOURCE_TAR
+
#
# library name
#
@@ -43,7 +74,14 @@ APP = test
#
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) := commands.c
SRCS-y += test.c
-SRCS-y += test_pci.c
+SRCS-y += resource.c
+SRCS-y += test_resource.c
+test_resource.res: test_resource.c
+ @ cp $< $@
+$(eval $(call linked_resource,test_resource_c,test_resource.res))
+$(eval $(call linked_tar_resource,test_resource_tar,test_resource.c))
+SRCS-$(CONFIG_RTE_APP_TEST_RESOURCE_TAR) += test_pci.c
+$(eval $(call linked_tar_resource,test_pci_sysfs,test_pci_sysfs))
SRCS-y += test_prefetch.c
SRCS-y += test_byteorder.c
SRCS-y += test_per_lcore.c
@@ -87,9 +125,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_thash.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_functions.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_scaling.c
+SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_multiwriter.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6_perf.c
SRCS-y += test_debug.c
SRCS-y += test_errno.c
@@ -151,6 +192,7 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring_perf.c
+SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_aes.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c
@@ -164,7 +206,7 @@ CFLAGS += -D_GNU_SOURCE
LDLIBS += -lm
# Disable VTA for memcpy test
-ifeq ($(CC), gcc)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS_test_memcpy.o += -fno-var-tracking-assignments
CFLAGS_test_memcpy_perf.o += -fno-var-tracking-assignments
@@ -185,6 +227,10 @@ endif
endif
endif
+ifeq ($(CONFIG_RTE_APP_TEST_RESOURCE_TAR),y)
+LDLIBS += -larchive
+endif
+
include $(RTE_SDK)/mk/rte.app.mk
endif
diff --git a/app/test/autotest_data.py b/app/test/autotest_data.py
index dde45111..1e6b422c 100644
--- a/app/test/autotest_data.py
+++ b/app/test/autotest_data.py
@@ -58,6 +58,12 @@ parallel_test_group_list = [
"Tests" :
[
{
+ "Name" : "Cycles autotest",
+ "Command" : "cycles_autotest",
+ "Func" : default_autotest,
+ "Report" : None,
+ },
+ {
"Name" : "Timer autotest",
"Command" : "timer_autotest",
"Func" : timer_autotest,
@@ -88,9 +94,9 @@ parallel_test_group_list = [
"Report" : None,
},
{
- "Name" : "Dump log history",
- "Command" : "dump_log_history",
- "Func" : dump_autotest,
+ "Name" : "Resource autotest",
+ "Command" : "resource_autotest",
+ "Func" : default_autotest,
"Report" : None,
},
{
@@ -109,7 +115,7 @@ parallel_test_group_list = [
},
{
"Prefix": "group_2",
- "Memory" : "128",
+ "Memory" : "16",
"Tests" :
[
{
@@ -164,7 +170,7 @@ parallel_test_group_list = [
},
{
"Prefix": "group_3",
- "Memory" : per_sockets(1024),
+ "Memory" : per_sockets(390),
"Tests" :
[
{
@@ -174,6 +180,12 @@ parallel_test_group_list = [
"Report" : None,
},
{
+ "Name" : "LPM6 autotest",
+ "Command" : "lpm6_autotest",
+ "Func" : default_autotest,
+ "Report" : None,
+ },
+ {
"Name" : "IVSHMEM autotest",
"Command" : "ivshmem_autotest",
"Func" : default_autotest,
@@ -287,7 +299,7 @@ parallel_test_group_list = [
},
{
"Prefix": "group_6",
- "Memory" : per_sockets(620),
+ "Memory" : per_sockets(128),
"Tests" :
[
{
@@ -324,7 +336,7 @@ parallel_test_group_list = [
},
{
"Prefix" : "group_7",
- "Memory" : "400",
+ "Memory" : "64",
"Tests" :
[
{
@@ -371,12 +383,6 @@ non_parallel_test_group_list = [
"Tests" :
[
{
- "Name" : "Cycles autotest",
- "Command" : "cycles_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
"Name" : "Mempool performance autotest",
"Command" : "mempool_perf_autotest",
"Func" : default_autotest,
@@ -412,7 +418,7 @@ non_parallel_test_group_list = [
},
{
"Prefix" : "power",
- "Memory" : per_sockets(512),
+ "Memory" : "16",
"Tests" :
[
{
@@ -425,7 +431,7 @@ non_parallel_test_group_list = [
},
{
"Prefix" : "power_acpi_cpufreq",
- "Memory" : per_sockets(512),
+ "Memory" : "16",
"Tests" :
[
{
@@ -438,7 +444,7 @@ non_parallel_test_group_list = [
},
{
"Prefix" : "power_kvm_vm",
- "Memory" : "512",
+ "Memory" : "16",
"Tests" :
[
{
@@ -450,19 +456,6 @@ non_parallel_test_group_list = [
]
},
{
- "Prefix" : "lpm6",
- "Memory" : "512",
- "Tests" :
- [
- {
- "Name" : "LPM6 autotest",
- "Command" : "lpm6_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
"Prefix": "timer_perf",
"Memory" : per_sockets(512),
"Tests" :
diff --git a/app/test/autotest_test_funcs.py b/app/test/autotest_test_funcs.py
index 0f012f6b..14cffd01 100644
--- a/app/test/autotest_test_funcs.py
+++ b/app/test/autotest_test_funcs.py
@@ -83,7 +83,7 @@ def spinlock_autotest(child, test_name):
"Test Failed",
"Hello from core ([0-9]*) !",
"Hello from within recursive locks from ([0-9]*) !",
- pexpect.TIMEOUT], timeout = 20)
+ pexpect.TIMEOUT], timeout = 5)
# ok
if index == 0:
break
@@ -145,18 +145,10 @@ def logs_autotest(child, test_name):
child.sendline(test_name)
log_list = [
- "TESTAPP1: this is a debug level message",
- "TESTAPP1: this is a info level message",
- "TESTAPP1: this is a warning level message",
- "TESTAPP2: this is a info level message",
- "TESTAPP2: this is a warning level message",
- "TESTAPP1: this is a debug level message",
- "TESTAPP1: this is a debug level message",
- "TESTAPP1: this is a info level message",
- "TESTAPP1: this is a warning level message",
- "TESTAPP2: this is a info level message",
- "TESTAPP2: this is a warning level message",
- "TESTAPP1: this is a debug level message",
+ "TESTAPP1: error message",
+ "TESTAPP1: critical message",
+ "TESTAPP2: critical message",
+ "TESTAPP1: error message",
]
for log_msg in log_list:
@@ -181,9 +173,9 @@ def timer_autotest(child, test_name):
i = 0
child.sendline(test_name)
- index = child.expect(["Start timer stress tests \(20 seconds\)",
+ index = child.expect(["Start timer stress tests",
"Test Failed",
- pexpect.TIMEOUT], timeout = 10)
+ pexpect.TIMEOUT], timeout = 5)
if index == 1:
return -1, "Fail"
@@ -192,16 +184,16 @@ def timer_autotest(child, test_name):
index = child.expect(["Start timer stress tests 2",
"Test Failed",
- pexpect.TIMEOUT], timeout = 40)
+ pexpect.TIMEOUT], timeout = 5)
if index == 1:
return -1, "Fail"
elif index == 2:
return -1, "Fail [Timeout]"
- index = child.expect(["Start timer basic tests \(20 seconds\)",
+ index = child.expect(["Start timer basic tests",
"Test Failed",
- pexpect.TIMEOUT], timeout = 20)
+ pexpect.TIMEOUT], timeout = 5)
if index == 1:
return -1, "Fail"
@@ -281,7 +273,7 @@ def timer_autotest(child, test_name):
def ring_autotest(child, test_name):
child.sendline(test_name)
index = child.expect(["Test OK", "Test Failed",
- pexpect.TIMEOUT], timeout = 15)
+ pexpect.TIMEOUT], timeout = 2)
if index == 1:
return -1, "Fail"
elif index == 2:
diff --git a/app/test/commands.c b/app/test/commands.c
index 9cb9606e..2df46b05 100644
--- a/app/test/commands.c
+++ b/app/test/commands.c
@@ -150,8 +150,6 @@ static void cmd_dump_parsed(void *parsed_result,
rte_dump_physmem_layout(stdout);
else if (!strcmp(res->dump, "dump_memzone"))
rte_memzone_dump(stdout);
- else if (!strcmp(res->dump, "dump_log_history"))
- rte_log_dump_history(stdout);
else if (!strcmp(res->dump, "dump_struct_sizes"))
dump_struct_sizes();
else if (!strcmp(res->dump, "dump_ring"))
@@ -164,7 +162,7 @@ static void cmd_dump_parsed(void *parsed_result,
cmdline_parse_token_string_t cmd_dump_dump =
TOKEN_STRING_INITIALIZER(struct cmd_dump_result, dump,
- "dump_physmem#dump_memzone#dump_log_history#"
+ "dump_physmem#dump_memzone#"
"dump_struct_sizes#dump_ring#dump_mempool#"
"dump_devargs");
@@ -439,7 +437,7 @@ int commands_init(void)
commands_len += strlen(t->command) + 1;
}
- commands = malloc(commands_len);
+ commands = malloc(commands_len + 1);
if (!commands)
return -1;
diff --git a/app/test/resource.c b/app/test/resource.c
new file mode 100644
index 00000000..0e2b62cd
--- /dev/null
+++ b/app/test/resource.c
@@ -0,0 +1,305 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of RehiveTech nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_debug.h>
+
+#include "resource.h"
+
+struct resource_list resource_list = TAILQ_HEAD_INITIALIZER(resource_list);
+
+size_t resource_size(const struct resource *r)
+{
+ return r->end - r->begin;
+}
+
+const struct resource *resource_find(const char *name)
+{
+ struct resource *r;
+
+ TAILQ_FOREACH(r, &resource_list, next) {
+ RTE_VERIFY(r->name);
+
+ if (!strcmp(r->name, name))
+ return r;
+ }
+
+ return NULL;
+}
+
+int resource_fwrite(const struct resource *r, FILE *f)
+{
+ const size_t goal = resource_size(r);
+ size_t total = 0;
+
+ while (total < goal) {
+ size_t wlen = fwrite(r->begin + total, 1, goal - total, f);
+ if (wlen == 0) {
+ perror(__func__);
+ return -1;
+ }
+
+ total += wlen;
+ }
+
+ return 0;
+}
+
+int resource_fwrite_file(const struct resource *r, const char *fname)
+{
+ FILE *f;
+ int ret;
+
+ f = fopen(fname, "w");
+ if (f == NULL) {
+ perror(__func__);
+ return -1;
+ }
+
+ ret = resource_fwrite(r, f);
+ fclose(f);
+ return ret;
+}
+
+#ifdef RTE_APP_TEST_RESOURCE_TAR
+#include <archive.h>
+#include <archive_entry.h>
+
+static int do_copy(struct archive *r, struct archive *w)
+{
+ const void *buf;
+ size_t len;
+#if ARCHIVE_VERSION_NUMBER >= 3000000
+ int64_t off;
+#else
+ off_t off;
+#endif
+ int ret;
+
+ while (1) {
+ ret = archive_read_data_block(r, &buf, &len, &off);
+ if (ret == ARCHIVE_RETRY)
+ continue;
+
+ if (ret == ARCHIVE_EOF)
+ return 0;
+
+ if (ret != ARCHIVE_OK)
+ return ret;
+
+ do {
+ ret = archive_write_data_block(w, buf, len, off);
+ if (ret != ARCHIVE_OK && ret != ARCHIVE_RETRY)
+ return ret;
+ } while (ret != ARCHIVE_OK);
+ }
+}
+
+int resource_untar(const struct resource *res)
+{
+ struct archive *r;
+ struct archive *w;
+ struct archive_entry *e;
+ void *p;
+ int flags = 0;
+ int ret;
+
+ p = malloc(resource_size(res));
+ if (p == NULL)
+ rte_panic("Failed to malloc %zu B\n", resource_size(res));
+
+ memcpy(p, res->begin, resource_size(res));
+
+ r = archive_read_new();
+ if (r == NULL) {
+ free(p);
+ return -1;
+ }
+
+ archive_read_support_format_all(r);
+ archive_read_support_filter_all(r);
+
+ w = archive_write_disk_new();
+ if (w == NULL) {
+ archive_read_free(r);
+ free(p);
+ return -1;
+ }
+
+ flags |= ARCHIVE_EXTRACT_PERM;
+ flags |= ARCHIVE_EXTRACT_FFLAGS;
+ archive_write_disk_set_options(w, flags);
+ archive_write_disk_set_standard_lookup(w);
+
+ ret = archive_read_open_memory(r, p, resource_size(res));
+ if (ret != ARCHIVE_OK)
+ goto fail;
+
+ while (1) {
+ ret = archive_read_next_header(r, &e);
+ if (ret == ARCHIVE_EOF)
+ break;
+ if (ret != ARCHIVE_OK)
+ goto fail;
+
+ ret = archive_write_header(w, e);
+ if (ret == ARCHIVE_EOF)
+ break;
+ if (ret != ARCHIVE_OK)
+ goto fail;
+
+ if (archive_entry_size(e) == 0)
+ continue;
+
+ ret = do_copy(r, w);
+ if (ret != ARCHIVE_OK)
+ goto fail;
+
+ ret = archive_write_finish_entry(w);
+ if (ret != ARCHIVE_OK)
+ goto fail;
+ }
+
+ archive_write_free(w);
+ archive_read_free(r);
+ free(p);
+ return 0;
+
+fail:
+ archive_write_free(w);
+ archive_read_free(r);
+ free(p);
+ rte_panic("Failed: %s\n", archive_error_string(r));
+ return -1;
+}
+
+int resource_rm_by_tar(const struct resource *res)
+{
+ struct archive *r;
+ struct archive_entry *e;
+ void *p;
+ int try_again = 1;
+ int attempts = 0;
+ int ret;
+
+ p = malloc(resource_size(res));
+ if (p == NULL)
+ rte_panic("Failed to malloc %zu B\n", resource_size(res));
+
+ memcpy(p, res->begin, resource_size(res));
+
+ /*
+ * If somebody creates a file somewhere inside the extracted TAR
+ * hierarchy during a test the resource_rm_by_tar might loop
+ * infinitely. We prevent this by adding the attempts counter there.
+ * In normal case, max N iteration is done where N is the depth of
+ * the file-hierarchy.
+ */
+ while (try_again && attempts < 10000) {
+ r = archive_read_new();
+ if (r == NULL) {
+ free(p);
+ return -1;
+ }
+
+ archive_read_support_format_all(r);
+ archive_read_support_filter_all(r);
+
+ ret = archive_read_open_memory(r, p, resource_size(res));
+ if (ret != ARCHIVE_OK) {
+ fprintf(stderr, "Failed: %s\n",
+ archive_error_string(r));
+ goto fail;
+ }
+
+ try_again = 0;
+
+ while (1) {
+ ret = archive_read_next_header(r, &e);
+ if (ret == ARCHIVE_EOF)
+ break;
+ if (ret != ARCHIVE_OK)
+ goto fail;
+
+ ret = remove(archive_entry_pathname(e));
+ if (ret < 0) {
+ switch (errno) {
+ case ENOTEMPTY:
+ case EEXIST:
+ try_again = 1;
+ break;
+
+ /* should not usually happen: */
+ case ENOENT:
+ case ENOTDIR:
+ case EROFS:
+ attempts += 1;
+ continue;
+ default:
+ perror("Failed to remove file");
+ goto fail;
+ }
+ }
+ }
+
+ archive_read_free(r);
+ attempts += 1;
+ }
+
+ if (attempts >= 10000) {
+ fprintf(stderr, "Failed to remove archive\n");
+ free(p);
+ return -1;
+ }
+
+ free(p);
+ return 0;
+
+fail:
+ archive_read_free(r);
+ free(p);
+
+ rte_panic("Failed: %s\n", archive_error_string(r));
+ return -1;
+}
+
+#endif /* RTE_APP_TEST_RESOURCE_TAR */
+
+void resource_register(struct resource *r)
+{
+ TAILQ_INSERT_TAIL(&resource_list, r, next);
+}
diff --git a/app/test/resource.h b/app/test/resource.h
new file mode 100644
index 00000000..1e961221
--- /dev/null
+++ b/app/test/resource.h
@@ -0,0 +1,135 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of RehiveTech nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RESOURCE_H_
+#define _RESOURCE_H_
+
+/**
+ * @file
+ *
+ * Test Resource API
+ *
+ * Each test can require and use some external resources. Usually, an external
+ * resource is a file or a filesystem sub-hierarchy. A resource is included
+ * inside the test executable.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stddef.h>
+
+#include <rte_eal.h>
+#include <rte_common.h>
+
+TAILQ_HEAD(resource_list, resource);
+extern struct resource_list resource_list;
+
+/**
+ * Representation of a resource. It points to the resource's binary data.
+ * The semantics of the binary data are defined by the target test.
+ */
+struct resource {
+ const char *name; /**< Unique name of the resource */
+ const char *begin; /**< Start of resource data */
+ const char *end; /**< End of resource data */
+ TAILQ_ENTRY(resource) next;
+};
+
+/**
+ * @return size of the given resource
+ */
+size_t resource_size(const struct resource *r);
+
+/**
+ * Find a resource by name in the global list of resources.
+ */
+const struct resource *resource_find(const char *name);
+
+/**
+ * Write the raw data of the resource to the given file.
+ * @return 0 on success
+ */
+int resource_fwrite(const struct resource *r, FILE *f);
+
+/**
+ * Write the raw data of the resource to the given file given by name.
+ * The name is relative to the current working directory.
+ * @return 0 on success
+ */
+int resource_fwrite_file(const struct resource *r, const char *fname);
+
+/**
+ * Treat the given resource as a tar archive. Extract
+ * the archive to the current directory.
+ */
+int resource_untar(const struct resource *res);
+
+/**
+ * Treat the given resource as a tar archive. Remove
+ * all files (related to the current directory) listed
+ * in the tar archive.
+ */
+int resource_rm_by_tar(const struct resource *res);
+
+/**
+ * Register a resource in the global list of resources.
+ * Not intended for direct use, please check the REGISTER_RESOURCE
+ * macro.
+ */
+void resource_register(struct resource *r);
+
+/**
+ * Definition of a resource linked externally (by means of the used toolchain).
+ * Only the base name of the resource is expected. The name refers to the
+ * linked pointers beg_<name> and end_<name> provided externally.
+ */
+#define REGISTER_LINKED_RESOURCE(n) \
+extern const char beg_ ##n; \
+extern const char end_ ##n; \
+REGISTER_RESOURCE(n, &beg_ ##n, &end_ ##n) \
+
+/**
+ * Definition of a resource described by its name, and pointers begin, end.
+ */
+#define REGISTER_RESOURCE(n, b, e) \
+static struct resource linkres_ ##n = { \
+ .name = RTE_STR(n), \
+ .begin = b, \
+ .end = e, \
+}; \
+static void __attribute__((constructor, used)) resinitfn_ ##n(void) \
+{ \
+ resource_register(&linkres_ ##n); \
+}
+
+#endif
diff --git a/app/test/test.h b/app/test/test.h
index a2fba607..81828bee 100644
--- a/app/test/test.h
+++ b/app/test/test.h
@@ -35,6 +35,7 @@
#define _TEST_H_
#include <stddef.h>
#include <sys/queue.h>
+#include "rte_log.h"
#define TEST_SUCCESS (0)
#define TEST_FAILED (-1)
@@ -64,7 +65,7 @@
} \
} while (0)
-
+/* Compare two buffers (length in bytes) */
#define TEST_ASSERT_BUFFERS_ARE_EQUAL(a, b, len, msg, ...) do { \
if (memcmp(a, b, len)) { \
printf("TestCase %s() line %d failed: " \
@@ -74,6 +75,61 @@
} \
} while (0)
+/* Compare two buffers with offset (length and offset in bytes) */
+#define TEST_ASSERT_BUFFERS_ARE_EQUAL_OFFSET(a, b, len, off, msg, ...) do { \
+ const uint8_t *_a_with_off = (const uint8_t *)a + off; \
+ const uint8_t *_b_with_off = (const uint8_t *)b + off; \
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(_a_with_off, _b_with_off, len, msg); \
+} while (0)
+
+/* Compare two buffers (length in bits) */
+#define TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(a, b, len, msg, ...) do { \
+ uint8_t _last_byte_a, _last_byte_b; \
+ uint8_t _last_byte_mask, _last_byte_bits; \
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(a, b, (len >> 3), msg); \
+ if (len % 8) { \
+ _last_byte_bits = len % 8; \
+ _last_byte_mask = ~((1 << (8 - _last_byte_bits)) - 1); \
+ _last_byte_a = ((const uint8_t *)a)[len >> 3]; \
+ _last_byte_b = ((const uint8_t *)b)[len >> 3]; \
+ _last_byte_a &= _last_byte_mask; \
+ _last_byte_b &= _last_byte_mask; \
+ if (_last_byte_a != _last_byte_b) { \
+ printf("TestCase %s() line %d failed: " \
+ msg "\n", __func__, __LINE__, ##__VA_ARGS__);\
+ TEST_TRACE_FAILURE(__FILE__, __LINE__, __func__); \
+ return TEST_FAILED; \
+ } \
+ } \
+} while (0)
+
+/* Compare two buffers with offset (length and offset in bits) */
+#define TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT_OFFSET(a, b, len, off, msg, ...) do { \
+ uint8_t _first_byte_a, _first_byte_b; \
+ uint8_t _first_byte_mask, _first_byte_bits; \
+ uint32_t _len_without_first_byte = (off % 8) ? \
+ len - (8 - (off % 8)) : \
+ len; \
+ uint32_t _off_in_bytes = (off % 8) ? (off >> 3) + 1 : (off >> 3); \
+ const uint8_t *_a_with_off = (const uint8_t *)a + _off_in_bytes; \
+ const uint8_t *_b_with_off = (const uint8_t *)b + _off_in_bytes; \
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(_a_with_off, _b_with_off, \
+ _len_without_first_byte, msg); \
+ if (off % 8) { \
+ _first_byte_bits = 8 - (off % 8); \
+ _first_byte_mask = (1 << _first_byte_bits) - 1; \
+ _first_byte_a = *(_a_with_off - 1); \
+ _first_byte_b = *(_b_with_off - 1); \
+ _first_byte_a &= _first_byte_mask; \
+ _first_byte_b &= _first_byte_mask; \
+ if (_first_byte_a != _first_byte_b) { \
+ printf("TestCase %s() line %d failed: " \
+ msg "\n", __func__, __LINE__, ##__VA_ARGS__); \
+ TEST_TRACE_FAILURE(__FILE__, __LINE__, __func__); \
+ return TEST_FAILED; \
+ } \
+ } \
+} while (0)
#define TEST_ASSERT_NOT_EQUAL(a, b, msg, ...) do { \
if (!(a != b)) { \
@@ -150,6 +206,12 @@ struct unit_test_case {
#define TEST_CASES_END() { NULL, NULL, NULL, NULL, NULL, 0 }
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#define TEST_HEXDUMP(file, title, buf, len) rte_hexdump(file, title, buf, len)
+#else
+#define TEST_HEXDUMP(file, title, buf, len) do {} while (0)
+#endif
+
struct unit_test_suite {
const char *suite_name;
int (*setup)(void);
diff --git a/app/test/test_alarm.c b/app/test/test_alarm.c
index 5d6f4a27..d83591c9 100644
--- a/app/test/test_alarm.c
+++ b/app/test/test_alarm.c
@@ -45,8 +45,8 @@
#define US_PER_MS 1000
-#define RTE_TEST_ALARM_TIMEOUT 3000 /* ms */
-#define RTE_TEST_CHECK_PERIOD 1000 /* ms */
+#define RTE_TEST_ALARM_TIMEOUT 10 /* ms */
+#define RTE_TEST_CHECK_PERIOD 3 /* ms */
static volatile int flag;
@@ -100,17 +100,17 @@ test_multi_alarms(void)
printf("Expect 6 callbacks in order...\n");
/* add two alarms in order */
- rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
- rte_eal_alarm_set(2000 * US_PER_MS, test_multi_cb, (void *)2);
+ rte_eal_alarm_set(10 * US_PER_MS, test_multi_cb, (void *)1);
+ rte_eal_alarm_set(20 * US_PER_MS, test_multi_cb, (void *)2);
/* now add in reverse order */
- rte_eal_alarm_set(6000 * US_PER_MS, test_multi_cb, (void *)6);
- rte_eal_alarm_set(5000 * US_PER_MS, test_multi_cb, (void *)5);
- rte_eal_alarm_set(4000 * US_PER_MS, test_multi_cb, (void *)4);
- rte_eal_alarm_set(3000 * US_PER_MS, test_multi_cb, (void *)3);
+ rte_eal_alarm_set(60 * US_PER_MS, test_multi_cb, (void *)6);
+ rte_eal_alarm_set(50 * US_PER_MS, test_multi_cb, (void *)5);
+ rte_eal_alarm_set(40 * US_PER_MS, test_multi_cb, (void *)4);
+ rte_eal_alarm_set(30 * US_PER_MS, test_multi_cb, (void *)3);
/* wait for expiry */
- rte_delay_ms(6500);
+ rte_delay_ms(65);
if (cb_count.cnt != 6) {
printf("Missing callbacks\n");
/* remove any callbacks that might remain */
@@ -121,12 +121,12 @@ test_multi_alarms(void)
cb_count.cnt = 0;
printf("Expect only callbacks with args 1 and 3...\n");
/* Add 3 flags, then delete one */
- rte_eal_alarm_set(3000 * US_PER_MS, test_multi_cb, (void *)3);
- rte_eal_alarm_set(2000 * US_PER_MS, test_multi_cb, (void *)2);
- rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
+ rte_eal_alarm_set(30 * US_PER_MS, test_multi_cb, (void *)3);
+ rte_eal_alarm_set(20 * US_PER_MS, test_multi_cb, (void *)2);
+ rte_eal_alarm_set(10 * US_PER_MS, test_multi_cb, (void *)1);
rm_count = rte_eal_alarm_cancel(test_multi_cb, (void *)2);
- rte_delay_ms(3500);
+ rte_delay_ms(35);
if (cb_count.cnt != 2 || rm_count != 1) {
printf("Error: invalid flags count or alarm removal failure"
" - flags value = %d, expected = %d\n",
@@ -138,9 +138,9 @@ test_multi_alarms(void)
printf("Testing adding and then removing multiple alarms\n");
/* finally test that no callbacks are called if we delete them all*/
- rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
- rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)2);
- rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)3);
+ rte_eal_alarm_set(10 * US_PER_MS, test_multi_cb, (void *)1);
+ rte_eal_alarm_set(10 * US_PER_MS, test_multi_cb, (void *)2);
+ rte_eal_alarm_set(10 * US_PER_MS, test_multi_cb, (void *)3);
rm_count = rte_eal_alarm_cancel(test_alarm_callback, (void *)-1);
if (rm_count != 0) {
printf("Error removing non-existant alarm succeeded\n");
@@ -157,19 +157,19 @@ test_multi_alarms(void)
* Also test that we can cancel head-of-line callbacks ok.*/
flag = 0;
recursive_error = 0;
- rte_eal_alarm_set(1000 * US_PER_MS, test_remove_in_callback, (void *)1);
- rte_eal_alarm_set(2000 * US_PER_MS, test_remove_in_callback, (void *)2);
+ rte_eal_alarm_set(10 * US_PER_MS, test_remove_in_callback, (void *)1);
+ rte_eal_alarm_set(20 * US_PER_MS, test_remove_in_callback, (void *)2);
rm_count = rte_eal_alarm_cancel(test_remove_in_callback, (void *)1);
if (rm_count != 1) {
printf("Error cancelling head-of-list callback\n");
return -1;
}
- rte_delay_ms(1500);
+ rte_delay_ms(15);
if (flag != 0) {
printf("Error, cancelling head-of-list leads to premature callback\n");
return -1;
}
- rte_delay_ms(1000);
+ rte_delay_ms(10);
if (flag != 2) {
printf("Error - expected callback not called\n");
rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1);
@@ -181,10 +181,10 @@ test_multi_alarms(void)
/* Check if it can cancel all for the same callback */
printf("Testing canceling all for the same callback\n");
flag_2 = 0;
- rte_eal_alarm_set(1000 * US_PER_MS, test_remove_in_callback, (void *)1);
- rte_eal_alarm_set(2000 * US_PER_MS, test_remove_in_callback_2, (void *)2);
- rte_eal_alarm_set(3000 * US_PER_MS, test_remove_in_callback_2, (void *)3);
- rte_eal_alarm_set(4000 * US_PER_MS, test_remove_in_callback, (void *)4);
+ rte_eal_alarm_set(10 * US_PER_MS, test_remove_in_callback, (void *)1);
+ rte_eal_alarm_set(20 * US_PER_MS, test_remove_in_callback_2, (void *)2);
+ rte_eal_alarm_set(30 * US_PER_MS, test_remove_in_callback_2, (void *)3);
+ rte_eal_alarm_set(40 * US_PER_MS, test_remove_in_callback, (void *)4);
rm_count = rte_eal_alarm_cancel(test_remove_in_callback_2, (void *)-1);
if (rm_count != 2) {
printf("Error, cannot cancel all for the same callback\n");
diff --git a/app/test/test_cmdline_string.c b/app/test/test_cmdline_string.c
index 915a7d76..c5bb9c0c 100644
--- a/app/test/test_cmdline_string.c
+++ b/app/test/test_cmdline_string.c
@@ -35,6 +35,7 @@
#include <string.h>
#include <inttypes.h>
+#include <rte_common.h>
#include <rte_string_fns.h>
#include <cmdline_parse.h>
@@ -65,9 +66,10 @@ struct string_elt_str string_elt_strs[] = {
{"one#two\nwith\nnewlines#three", "two\nwith\nnewlines", 1},
};
-#if CMDLINE_TEST_BUFSIZE < STR_TOKEN_SIZE
+#if (CMDLINE_TEST_BUFSIZE < STR_TOKEN_SIZE) \
+|| (CMDLINE_TEST_BUFSIZE < STR_MULTI_TOKEN_SIZE)
#undef CMDLINE_TEST_BUFSIZE
-#define CMDLINE_TEST_BUFSIZE STR_TOKEN_SIZE
+#define CMDLINE_TEST_BUFSIZE RTE_MAX(STR_TOKEN_SIZE, STR_MULTI_TOKEN_SIZE)
#endif
struct string_nb_str {
@@ -97,6 +99,11 @@ struct string_parse_str string_parse_strs[] = {
{"two with\rgarbage\tcharacters\n",
"one#two with\rgarbage\tcharacters\n#three",
"two with\rgarbage\tcharacters\n"},
+ {"one two", "one", "one"}, /* fixed string */
+ {"one two", TOKEN_STRING_MULTI, "one two"}, /* multi string */
+ {"one two", NULL, "one"}, /* any string */
+ {"one two #three", TOKEN_STRING_MULTI, "one two "},
+ /* multi string with comment */
};
@@ -124,7 +131,6 @@ struct string_invalid_str string_invalid_strs[] = {
"toolong!!!toolong!!!toolong!!!toolong!!!toolong!!!toolong!!!"
"toolong!!!toolong!!!toolong!!!toolong!!!toolong!!!toolong!!!"
"toolong!!!" },
- {"invalid", ""},
{"", "invalid"}
};
@@ -350,8 +356,7 @@ test_parse_string_valid(void)
string_parse_strs[i].str, help_str);
return -1;
}
- if (strncmp(buf, string_parse_strs[i].result,
- sizeof(string_parse_strs[i].result) - 1) != 0) {
+ if (strcmp(buf, string_parse_strs[i].result) != 0) {
printf("Error: result mismatch!\n");
return -1;
}
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 8e8da988..fbfe1d0d 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -42,6 +42,10 @@
#include "test.h"
#include "test_cryptodev.h"
+
+#include "test_cryptodev_aes.h"
+#include "test_cryptodev_kasumi_test_vectors.h"
+#include "test_cryptodev_kasumi_hash_test_vectors.h"
#include "test_cryptodev_snow3g_test_vectors.h"
#include "test_cryptodev_snow3g_hash_test_vectors.h"
#include "test_cryptodev_gcm_test_vectors.h"
@@ -109,35 +113,20 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-static int
-setup_oop_test_mbufs(struct rte_mbuf **ibuf, struct rte_mbuf **obuf,
- struct rte_mempool *mpool, const char *string, size_t len,
- uint8_t blocksize) {
- *ibuf = setup_test_string(mpool, string, len, blocksize);
- if (*ibuf == NULL)
- return -(EFAULT);
- *obuf = setup_test_string(mpool, NULL, len, blocksize);
- if (*obuf == NULL)
- return -(EFAULT);
- return 0;
-}
-
-#if HEX_DUMP
-static void
-hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
+/* Get number of bytes in X bits (rounding up) */
+static uint32_t
+ceil_byte_length(uint32_t num_bits)
{
- rte_hexdump(f, title, rte_pktmbuf_mtod(m, const void *), m->data_len);
+ if (num_bits % 8)
+ return ((num_bits >> 3) + 1);
+ else
+ return (num_bits >> 3);
}
-#endif
static struct rte_crypto_op *
process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
-#if HEX_DUMP
- hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
-#endif
-
if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
@@ -148,11 +137,6 @@ process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
- if (obuf)
- hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
-#endif
-
return op;
}
@@ -241,6 +225,20 @@ testsuite_setup(void)
}
}
+ /* Create 2 KASUMI devices if required */
+ if (gbl_cryptodev_type == RTE_CRYPTODEV_KASUMI_PMD) {
+ nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_KASUMI_PMD);
+ if (nb_devs < 2) {
+ for (i = nb_devs; i < 2; i++) {
+ TEST_ASSERT_SUCCESS(rte_eal_vdev_init(
+ CRYPTODEV_NAME_KASUMI_PMD, NULL),
+ "Failed to create instance %u of"
+ " pmd : %s",
+ i, CRYPTODEV_NAME_KASUMI_PMD);
+ }
+ }
+ }
+
/* Create 2 NULL devices if required */
if (gbl_cryptodev_type == RTE_CRYPTODEV_NULL_PMD) {
nb_devs = rte_cryptodev_count_devtype(
@@ -318,12 +316,12 @@ testsuite_teardown(void)
if (ts_params->mbuf_pool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_MBUFPOOL count %u\n",
- rte_mempool_count(ts_params->mbuf_pool));
+ rte_mempool_avail_count(ts_params->mbuf_pool));
}
if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->op_mpool));
+ rte_mempool_avail_count(ts_params->op_mpool));
}
}
@@ -398,10 +396,13 @@ ut_teardown(void)
/*
* free mbuf - both obuf and ibuf are usually the same,
- * but rte copes even if we call free twice
+ * so check if they point at the same address is necessary,
+ * to avoid freeing the mbuf twice.
*/
if (ut_params->obuf) {
rte_pktmbuf_free(ut_params->obuf);
+ if (ut_params->ibuf == ut_params->obuf)
+ ut_params->ibuf = 0;
ut_params->obuf = 0;
}
if (ut_params->ibuf) {
@@ -411,7 +412,7 @@ ut_teardown(void)
if (ts_params->mbuf_pool != NULL)
RTE_LOG(DEBUG, USER1, "CRYPTO_MBUFPOOL count %u\n",
- rte_mempool_count(ts_params->mbuf_pool));
+ rte_mempool_avail_count(ts_params->mbuf_pool));
rte_cryptodev_stats_get(ts_params->valid_devs[0], &stats);
@@ -922,661 +923,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
return TEST_SUCCESS;
}
-
-static int
-test_AES_CBC_HMAC_SHA1_encrypt_digest_oop(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and space for digest */
-
- TEST_ASSERT_EQUAL(setup_oop_test_mbufs(&ut_params->ibuf,
- &ut_params->obuf, ts_params->mbuf_pool, catch_22_quote,
- QUOTE_512_BYTES, 0), 0,
- "Allocation of rte_mbuf failed");
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->obuf,
- DIGEST_BYTE_LENGTH_SHA1);
-
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = &ut_params->auth_xform;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = NULL;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->auth_xform.auth.key.data = hmac_sha1_key;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
-
- ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0],
- &ut_params->cipher_xform);
-
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
-
-
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
- sym_op->m_dst = ut_params->obuf;
-
- sym_op->auth.digest.data = ut_params->digest;
-
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->obuf, QUOTE_512_BYTES);
-
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
- /* Set crypto operation cipher parameters */
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
-
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data,
- "Failed to prepend place for iv input");
-
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_prepend(ut_params->obuf,
- CIPHER_IV_LENGTH_AES_CBC),
- "Failed to prepend place for iv output");
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
-
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
-
- /* Process crypto operation */
- TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
- ut_params->op), "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto op processing failed");
-
- /* Validate obuf */
- uint8_t *ciphertext;
-
- ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_dst,
- uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
-
- TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES,
- "ciphertext data not as expected");
-
- uint8_t *digest = ciphertext + QUOTE_512_BYTES;
-
- TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
- catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
- gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
- TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
- DIGEST_BYTE_LENGTH_SHA1,
- "Generated digest data not as expected");
-
-
- return TEST_SUCCESS;
-}
-
-
-static int
-test_AES_CBC_HMAC_SHA1_decrypt_digest_oop_ver(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and digest */
-
- TEST_ASSERT_EQUAL(setup_oop_test_mbufs(&ut_params->ibuf,
- &ut_params->obuf, ts_params->mbuf_pool,
- (const char *)
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES, 0), 0,
- "Allocation of rte_mbuf failed");
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_SHA1);
-
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
- DIGEST_BYTE_LENGTH_SHA1);
-
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = NULL;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = &ut_params->cipher_xform;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->auth_xform.auth.key.data = hmac_sha1_key;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
-
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
- /* attach symmetric crypto session to crypto operations */
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
- sym_op->m_dst = ut_params->obuf;
-
- sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
-
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
-
- TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data,
- "Failed to prepend place for iv input");
-
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_prepend(ut_params->obuf,
- CIPHER_IV_LENGTH_AES_CBC),
- "Failed to prepend place for iv output");
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
-
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
-
- /* Process crypto operation */
- TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
- ut_params->op), "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "Digest verification failed");
-
- ut_params->obuf = ut_params->op->sym->m_dst;
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
- catch_22_quote,
- QUOTE_512_BYTES,
- "Ciphertext data not as expected");
-
-
- return TEST_SUCCESS;
-}
-
-
-static int
-test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and space for digest */
- ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
- catch_22_quote, QUOTE_512_BYTES, 0);
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_SHA1);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
- TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(ut_params->op, 2),
- "failed to allocate space for crypto transforms");
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
-
- /* Set crypto operation data parameters */
- sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-
- /* cipher parameters */
- sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- sym_op->xform->cipher.key.data = aes_cbc_key;
- sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* hash parameters */
- sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
-
- sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- sym_op->xform->next->auth.key.data = hmac_sha1_key;
- sym_op->xform->next->auth.digest_length =
- DIGEST_BYTE_LENGTH_SHA1;
-
- sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
-
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
- /* Process crypto operation */
- TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
- ut_params->op), "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto op processing failed");
-
- ut_params->obuf = ut_params->op->sym->m_src;
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES,
- "Ciphertext data not as expected");
-
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
- catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
- gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
- TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
- DIGEST_BYTE_LENGTH_SHA1,
- "Generated digest data not as expected");
-
-
- return TEST_SUCCESS;
-}
-
-static int
-test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and digest */
- ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
- (const char *)
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES, 0);
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_SHA1);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
- DIGEST_BYTE_LENGTH_SHA1);
-
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = NULL;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = &ut_params->cipher_xform;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->auth_xform.auth.key.data = hmac_sha1_key;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
-
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
- /* attach symmetric crypto session to crypto operations */
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
-
- sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
-
- /* Process crypto operation */
- TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
- ut_params->op), "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto op processing failed");
-
- ut_params->obuf = ut_params->op->sym->m_src;
-
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
- catch_22_quote,
- QUOTE_512_BYTES,
- "Ciphertext data not as expected");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "Digest verification failed");
-
-
- return TEST_SUCCESS;
-}
-
-
-/* ***** AES-CBC / HMAC-SHA256 Hash Tests ***** */
-
-#define HMAC_KEY_LENGTH_SHA256 (DIGEST_BYTE_LENGTH_SHA256)
-
-static uint8_t hmac_sha256_key[] = {
- 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
- 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
- 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
- 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60 };
-
-static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA256_digest[] = {
- 0xc8, 0x57, 0x57, 0x31, 0x03, 0xe0, 0x03, 0x55,
- 0x07, 0xc8, 0x9e, 0x7f, 0x48, 0x9a, 0x61, 0x9a,
- 0x68, 0xee, 0x03, 0x0e, 0x71, 0x75, 0xc7, 0xf4,
- 0x2e, 0x45, 0x26, 0x32, 0x7c, 0x12, 0x15, 0x15 };
-
-static int
-test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and space for digest */
- ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
- catch_22_quote, QUOTE_512_BYTES, 0);
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = &ut_params->auth_xform;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = NULL;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
- ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA256;
- ut_params->auth_xform.auth.key.data = hmac_sha256_key;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
-
- /* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0],
- &ut_params->cipher_xform);
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
-
- sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
- /* Process crypto operation */
- TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
- ut_params->op), "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto op processing failed");
-
- ut_params->obuf = ut_params->op->sym->m_src;
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES,
- "Ciphertext data not as expected");
-
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
- catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA256_digest,
- gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
- TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 :
- DIGEST_BYTE_LENGTH_SHA256,
- "Generated digest data not as expected");
-
-
- return TEST_SUCCESS;
-}
-
-static int
-test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and digest */
- ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
- (const char *)
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES, 0);
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA256_digest,
- DIGEST_BYTE_LENGTH_SHA256);
-
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = NULL;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = &ut_params->cipher_xform;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
- ut_params->auth_xform.auth.key.data = hmac_sha256_key;
- ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA256;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
-
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
-
- /* Set crypto operation data parameters */
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
-
- sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
- /* Process crypto operation */
- TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
- ut_params->op), "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto op processing failed");
-
- ut_params->obuf = ut_params->op->sym->m_src;
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC, catch_22_quote,
- QUOTE_512_BYTES,
- "Plaintext data not as expected");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "Digest verification failed");
-
- return TEST_SUCCESS;
-}
-
/* ***** AES-CBC / HMAC-SHA512 Hash Tests ***** */
#define HMAC_KEY_LENGTH_SHA512 (DIGEST_BYTE_LENGTH_SHA512)
@@ -1601,106 +947,6 @@ static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA512_digest[] = {
0x21, 0x96, 0x65, 0x93, 0x74, 0x43, 0x2A, 0x1D,
0x2E, 0xBF, 0xC2, 0xC2, 0xEE, 0xCC, 0x2F, 0x0A };
-static int
-test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and space for digest */
- ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
- catch_22_quote, QUOTE_512_BYTES, 0);
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_SHA512);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = &ut_params->auth_xform;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = NULL;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
- ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA512;
- ut_params->auth_xform.auth.key.data = hmac_sha512_key;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA512;
-
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->cipher_xform);
-
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
-
- sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
- /* Process crypto operation */
- TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
- ut_params->op), "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto op processing failed");
-
- ut_params->obuf = ut_params->op->sym->m_src;
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES,
- "Ciphertext data not as expected");
-
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
- catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA512_digest,
- gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
- TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 :
- DIGEST_BYTE_LENGTH_SHA512,
- "Generated digest data not as expected");
-
- return TEST_SUCCESS;
-}
static int
@@ -1712,25 +958,6 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
struct crypto_unittest_params *ut_params,
struct crypto_testsuite_params *ts_params);
-static int
-test_AES_CBC_HMAC_SHA512_decrypt_digest_verify(void)
-{
- struct crypto_unittest_params *ut_params = &unittest_params;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- TEST_ASSERT(test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
- ut_params) == TEST_SUCCESS,
- "Failed to create session params");
-
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
- return test_AES_CBC_HMAC_SHA512_decrypt_perform(ut_params->sess,
- ut_params, ts_params);
-}
static int
test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
@@ -1835,241 +1062,58 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
return TEST_SUCCESS;
}
-/* ***** AES-CBC / HMAC-AES_XCBC Chain Tests ***** */
-
-static uint8_t aes_cbc_hmac_aes_xcbc_key[] = {
- 0x87, 0x61, 0x54, 0x53, 0xC4, 0x6D, 0xDD, 0x51,
- 0xE1, 0x9F, 0x86, 0x64, 0x39, 0x0A, 0xE6, 0x59
- };
-
-static const uint8_t catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest[] = {
- 0xE0, 0xAC, 0x9A, 0xC4, 0x22, 0x64, 0x35, 0x89,
- 0x77, 0x1D, 0x8B, 0x75
- };
-
static int
-test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
+test_AES_mb_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and space for digest */
- ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
- catch_22_quote, QUOTE_512_BYTES, 0);
-
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = &ut_params->auth_xform;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = NULL;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
- ut_params->auth_xform.auth.key.length = AES_XCBC_MAC_KEY_SZ;
- ut_params->auth_xform.auth.key.data = aes_cbc_hmac_aes_xcbc_key;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
-
- /* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0],
- &ut_params->cipher_xform);
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
+ int status;
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+ status = test_AES_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_AESNI_MB_PMD);
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
-
- /* Set operation cipher parameters */
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- sym_op->m_src, CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(sym_op->m_src);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
- /* Set operation authentication parameters */
- sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
- sym_op->m_src, DIGEST_BYTE_LENGTH_AES_XCBC);
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- sym_op->m_src,
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
-
- memset(sym_op->auth.digest.data, 0, DIGEST_BYTE_LENGTH_AES_XCBC);
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
-
- /* Process crypto operation */
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
- TEST_ASSERT_NOT_NULL(ut_params->op, "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto op processing failed");
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
- uint8_t *, CIPHER_IV_LENGTH_AES_CBC),
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES,
- "Ciphertext data not as expected");
-
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod_offset(
- ut_params->op->sym->m_src, uint8_t *,
- CIPHER_IV_LENGTH_AES_CBC +
- QUOTE_512_BYTES),
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC,
- "Generated digest data not as expected");
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
return TEST_SUCCESS;
}
static int
-test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
+test_AES_qat_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
-
- /* Generate test mbuf data and space for digest */
- ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
- (const char *)catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES, 0);
-
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = NULL;
-
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = &ut_params->cipher_xform;
-
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
- ut_params->auth_xform.auth.key.length = AES_XCBC_MAC_KEY_SZ;
- ut_params->auth_xform.auth.key.data = aes_cbc_hmac_aes_xcbc_key;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_AES_XCBC;
-
- /* Create Crypto session*/
- ut_params->sess =
- rte_cryptodev_sym_session_create(ts_params->valid_devs[0],
- &ut_params->auth_xform);
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+ int status;
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
- /* Set crypto operation data parameters */
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
+ status = test_AES_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_QAT_SYM_PMD);
-
- sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
- ut_params->ibuf, DIGEST_BYTE_LENGTH_AES_XCBC);
- TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
- "no room to append digest");
-
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_AES_XCBC;
-
- rte_memcpy(sym_op->auth.digest.data,
- catch_22_quote_2_512_bytes_HMAC_AES_XCBC_digest,
- DIGEST_BYTE_LENGTH_AES_XCBC);
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
- /* Process crypto operation */
- TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
- ut_params->op), "failed to process sym crypto op");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto op processing failed");
-
- ut_params->obuf = ut_params->op->sym->m_src;
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC, catch_22_quote,
- QUOTE_512_BYTES,
- "Ciphertext data not as expected");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "Digest verification failed");
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
return TEST_SUCCESS;
}
/* ***** Snow3G Tests ***** */
static int
-create_snow3g_hash_session(uint8_t dev_id,
+create_snow3g_kasumi_hash_session(uint8_t dev_id,
const uint8_t *key, const uint8_t key_len,
const uint8_t aad_len, const uint8_t auth_len,
- enum rte_crypto_auth_operation op)
+ enum rte_crypto_auth_operation op,
+ enum rte_crypto_auth_algorithm algo)
{
uint8_t hash_key[key_len];
struct crypto_unittest_params *ut_params = &unittest_params;
memcpy(hash_key, key, key_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "key:", key, key_len);
-#endif
+
+ TEST_HEXDUMP(stdout, "key:", key, key_len);
+
/* Setup Authentication Parameters */
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = op;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
+ ut_params->auth_xform.auth.algo = algo;
ut_params->auth_xform.auth.key.length = key_len;
ut_params->auth_xform.auth.key.data = hash_key;
ut_params->auth_xform.auth.digest_length = auth_len;
@@ -2079,9 +1123,11 @@ create_snow3g_hash_session(uint8_t dev_id,
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
return 0;
}
+
static int
-create_snow3g_cipher_session(uint8_t dev_id,
+create_snow3g_kasumi_cipher_session(uint8_t dev_id,
enum rte_crypto_cipher_operation op,
+ enum rte_crypto_cipher_algorithm algo,
const uint8_t *key, const uint8_t key_len)
{
uint8_t cipher_key[key_len];
@@ -2094,14 +1140,13 @@ create_snow3g_cipher_session(uint8_t dev_id,
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
+ ut_params->cipher_xform.cipher.algo = algo;
ut_params->cipher_xform.cipher.op = op;
ut_params->cipher_xform.cipher.key.data = cipher_key;
ut_params->cipher_xform.cipher.key.length = key_len;
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "key:", key, key_len);
-#endif
+ TEST_HEXDUMP(stdout, "key:", key, key_len);
+
/* Create Crypto session */
ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
&ut_params->
@@ -2111,9 +1156,10 @@ create_snow3g_cipher_session(uint8_t dev_id,
}
static int
-create_snow3g_cipher_operation(const uint8_t *iv, const unsigned iv_len,
+create_snow3g_kasumi_cipher_operation(const uint8_t *iv, const unsigned iv_len,
const unsigned cipher_len,
- const unsigned cipher_offset)
+ const unsigned cipher_offset,
+ enum rte_crypto_cipher_algorithm algo)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -2134,7 +1180,11 @@ create_snow3g_cipher_operation(const uint8_t *iv, const unsigned iv_len,
sym_op->m_src = ut_params->ibuf;
/* iv */
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+ if (algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
+ else
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+
sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf
, iv_pad_len);
@@ -2151,9 +1201,10 @@ create_snow3g_cipher_operation(const uint8_t *iv, const unsigned iv_len,
}
static int
-create_snow3g_cipher_operation_oop(const uint8_t *iv, const unsigned iv_len,
+create_snow3g_kasumi_cipher_operation_oop(const uint8_t *iv, const uint8_t iv_len,
const unsigned cipher_len,
- const unsigned cipher_offset)
+ const unsigned cipher_offset,
+ enum rte_crypto_cipher_algorithm algo)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -2175,9 +1226,12 @@ create_snow3g_cipher_operation_oop(const uint8_t *iv, const unsigned iv_len,
sym_op->m_dst = ut_params->obuf;
/* iv */
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf
- , iv_pad_len);
+ if (algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
+ else
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ iv_pad_len);
TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
@@ -2192,11 +1246,14 @@ create_snow3g_cipher_operation_oop(const uint8_t *iv, const unsigned iv_len,
}
static int
-create_snow3g_cipher_auth_session(uint8_t dev_id,
+create_snow3g_kasumi_cipher_auth_session(uint8_t dev_id,
enum rte_crypto_cipher_operation cipher_op,
enum rte_crypto_auth_operation auth_op,
+ enum rte_crypto_auth_algorithm auth_algo,
+ enum rte_crypto_cipher_algorithm cipher_algo,
const uint8_t *key, const uint8_t key_len,
const uint8_t aad_len, const uint8_t auth_len)
+
{
uint8_t cipher_auth_key[key_len];
@@ -2209,7 +1266,7 @@ create_snow3g_cipher_auth_session(uint8_t dev_id,
ut_params->auth_xform.next = NULL;
ut_params->auth_xform.auth.op = auth_op;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
+ ut_params->auth_xform.auth.algo = auth_algo;
ut_params->auth_xform.auth.key.length = key_len;
/* Hash key = cipher key */
ut_params->auth_xform.auth.key.data = cipher_auth_key;
@@ -2220,14 +1277,13 @@ create_snow3g_cipher_auth_session(uint8_t dev_id,
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = &ut_params->auth_xform;
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
+ ut_params->cipher_xform.cipher.algo = cipher_algo;
ut_params->cipher_xform.cipher.op = cipher_op;
ut_params->cipher_xform.cipher.key.data = cipher_auth_key;
ut_params->cipher_xform.cipher.key.length = key_len;
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "key:", key, key_len);
-#endif
+ TEST_HEXDUMP(stdout, "key:", key, key_len);
+
/* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
&ut_params->cipher_xform);
@@ -2237,12 +1293,14 @@ create_snow3g_cipher_auth_session(uint8_t dev_id,
}
static int
-create_snow3g_auth_cipher_session(uint8_t dev_id,
+create_snow3g_kasumi_auth_cipher_session(uint8_t dev_id,
enum rte_crypto_cipher_operation cipher_op,
enum rte_crypto_auth_operation auth_op,
+ enum rte_crypto_auth_algorithm auth_algo,
+ enum rte_crypto_cipher_algorithm cipher_algo,
const uint8_t *key, const uint8_t key_len,
const uint8_t aad_len, const uint8_t auth_len)
- {
+{
uint8_t auth_cipher_key[key_len];
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -2253,7 +1311,7 @@ create_snow3g_auth_cipher_session(uint8_t dev_id,
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.auth.op = auth_op;
ut_params->auth_xform.next = &ut_params->cipher_xform;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
+ ut_params->auth_xform.auth.algo = auth_algo;
ut_params->auth_xform.auth.key.length = key_len;
ut_params->auth_xform.auth.key.data = auth_cipher_key;
ut_params->auth_xform.auth.digest_length = auth_len;
@@ -2262,14 +1320,13 @@ create_snow3g_auth_cipher_session(uint8_t dev_id,
/* Setup Cipher Parameters */
ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
ut_params->cipher_xform.next = NULL;
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
+ ut_params->cipher_xform.cipher.algo = cipher_algo;
ut_params->cipher_xform.cipher.op = cipher_op;
ut_params->cipher_xform.cipher.key.data = auth_cipher_key;
ut_params->cipher_xform.cipher.key.length = key_len;
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "key:", key, key_len);
-#endif
+ TEST_HEXDUMP(stdout, "key:", key, key_len);
+
/* Create Crypto session*/
ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
&ut_params->auth_xform);
@@ -2280,11 +1337,12 @@ create_snow3g_auth_cipher_session(uint8_t dev_id,
}
static int
-create_snow3g_hash_operation(const uint8_t *auth_tag,
+create_snow3g_kasumi_hash_operation(const uint8_t *auth_tag,
const unsigned auth_tag_len,
const uint8_t *aad, const unsigned aad_len,
unsigned data_pad_len,
enum rte_crypto_auth_operation op,
+ enum rte_crypto_auth_algorithm algo,
const unsigned auth_len, const unsigned auth_offset)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -2313,9 +1371,12 @@ create_snow3g_hash_operation(const uint8_t *auth_tag,
* The cryptodev API calls out -
* - the array must be big enough to hold the AAD, plus any
* space to round this up to the nearest multiple of the
- * block size (16 bytes).
+ * block size (8 bytes for KASUMI and 16 bytes for SNOW3G).
*/
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+ if (algo == RTE_CRYPTO_AUTH_KASUMI_F9)
+ aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 8);
+ else
+ aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, aad_buffer_len);
TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
@@ -2327,10 +1388,8 @@ create_snow3g_hash_operation(const uint8_t *auth_tag,
memset(sym_op->auth.aad.data, 0, aad_buffer_len);
rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "aad:",
+ TEST_HEXDUMP(stdout, "aad:",
sym_op->auth.aad.data, aad_len);
-#endif
/* digest */
sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
@@ -2347,11 +1406,9 @@ create_snow3g_hash_operation(const uint8_t *auth_tag,
else
rte_memcpy(sym_op->auth.digest.data, auth_tag, auth_tag_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "digest:",
+ TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
-#endif
sym_op->auth.data.length = auth_len;
sym_op->auth.data.offset = auth_offset;
@@ -2360,11 +1417,13 @@ create_snow3g_hash_operation(const uint8_t *auth_tag,
}
static int
-create_snow3g_cipher_hash_operation(const uint8_t *auth_tag,
+create_snow3g_kasumi_cipher_hash_operation(const uint8_t *auth_tag,
const unsigned auth_tag_len,
const uint8_t *aad, const uint8_t aad_len,
unsigned data_pad_len,
enum rte_crypto_auth_operation op,
+ enum rte_crypto_auth_algorithm auth_algo,
+ enum rte_crypto_cipher_algorithm cipher_algo,
const uint8_t *iv, const uint8_t iv_len,
const unsigned cipher_len, const unsigned cipher_offset,
const unsigned auth_len, const unsigned auth_offset)
@@ -2390,7 +1449,10 @@ create_snow3g_cipher_hash_operation(const uint8_t *auth_tag,
/* iv */
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+ if (cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
+ else
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, iv_pad_len);
@@ -2411,9 +1473,12 @@ create_snow3g_cipher_hash_operation(const uint8_t *auth_tag,
* The cryptodev API calls out -
* - the array must be big enough to hold the AAD, plus any
* space to round this up to the nearest multiple of the
- * block size (16 bytes).
+ * block size (8 bytes for KASUMI and 16 bytes for SNOW3G).
*/
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+ if (auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9)
+ aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 8);
+ else
+ aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
sym_op->auth.aad.data =
(uint8_t *)rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *);
@@ -2426,10 +1491,8 @@ create_snow3g_cipher_hash_operation(const uint8_t *auth_tag,
memset(sym_op->auth.aad.data, 0, aad_buffer_len);
rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "aad:",
+ TEST_HEXDUMP(stdout, "aad:",
sym_op->auth.aad.data, aad_len);
-#endif
/* digest */
sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
@@ -2446,11 +1509,9 @@ create_snow3g_cipher_hash_operation(const uint8_t *auth_tag,
else
rte_memcpy(sym_op->auth.digest.data, auth_tag, auth_tag_len);
- #ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "digest:",
+ TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
- #endif
sym_op->auth.data.length = auth_len;
sym_op->auth.data.offset = auth_offset;
@@ -2459,12 +1520,14 @@ create_snow3g_cipher_hash_operation(const uint8_t *auth_tag,
}
static int
-create_snow3g_auth_cipher_operation(const unsigned auth_tag_len,
+create_snow3g_kasumi_auth_cipher_operation(const unsigned auth_tag_len,
const uint8_t *iv, const uint8_t iv_len,
const uint8_t *aad, const uint8_t aad_len,
unsigned data_pad_len,
const unsigned cipher_len, const unsigned cipher_offset,
- const unsigned auth_len, const unsigned auth_offset)
+ const unsigned auth_len, const unsigned auth_offset,
+ enum rte_crypto_auth_algorithm auth_algo,
+ enum rte_crypto_cipher_algorithm cipher_algo)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -2499,13 +1562,15 @@ create_snow3g_auth_cipher_operation(const unsigned auth_tag_len,
memset(sym_op->auth.digest.data, 0, auth_tag_len);
- #ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "digest:",
+ TEST_HEXDUMP(stdout, "digest:",
sym_op->auth.digest.data,
sym_op->auth.digest.length);
- #endif
+
/* iv */
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+ if (cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
+ else
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, iv_pad_len);
@@ -2523,9 +1588,12 @@ create_snow3g_auth_cipher_operation(const unsigned auth_tag_len,
* The cryptodev API calls out -
* - the array must be big enough to hold the AAD, plus any
* space to round this up to the nearest multiple of the
- * block size (16 bytes).
+ * block size (8 bytes for KASUMI 16 bytes).
*/
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+ if (auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9)
+ aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 8);
+ else
+ aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, aad_buffer_len);
@@ -2538,10 +1606,8 @@ create_snow3g_auth_cipher_operation(const unsigned auth_tag_len,
memset(sym_op->auth.aad.data, 0, aad_buffer_len);
rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "aad:",
+ TEST_HEXDUMP(stdout, "aad:",
sym_op->auth.aad.data, aad_len);
-#endif
sym_op->cipher.data.length = cipher_len;
sym_op->cipher.data.offset = auth_offset + cipher_offset;
@@ -2560,13 +1626,15 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
int retval;
unsigned plaintext_pad_len;
+ unsigned plaintext_len;
uint8_t *plaintext;
/* Create SNOW3G session */
- retval = create_snow3g_hash_session(ts_params->valid_devs[0],
+ retval = create_snow3g_kasumi_hash_session(ts_params->valid_devs[0],
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->digest.len,
- RTE_CRYPTO_AUTH_OP_GENERATE);
+ RTE_CRYPTO_AUTH_OP_GENERATE,
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2);
if (retval < 0)
return retval;
@@ -2576,17 +1644,19 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
/* Append data which is padded to a multiple of */
/* the algorithms block size */
- plaintext_pad_len = tdata->plaintext.len >> 3;
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
plaintext_pad_len);
- memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len >> 3);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
- /* Create SNOW3G opertaion */
- retval = create_snow3g_hash_operation(NULL, tdata->digest.len,
+ /* Create SNOW3G operation */
+ retval = create_snow3g_kasumi_hash_operation(NULL, tdata->digest.len,
tdata->aad.data, tdata->aad.len,
plaintext_pad_len, RTE_CRYPTO_AUTH_OP_GENERATE,
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
tdata->validAuthLenInBits.len,
tdata->validAuthOffsetLenInBits.len);
if (retval < 0)
@@ -2617,13 +1687,15 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
int retval;
unsigned plaintext_pad_len;
+ unsigned plaintext_len;
uint8_t *plaintext;
/* Create SNOW3G session */
- retval = create_snow3g_hash_session(ts_params->valid_devs[0],
+ retval = create_snow3g_kasumi_hash_session(ts_params->valid_devs[0],
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->digest.len,
- RTE_CRYPTO_AUTH_OP_VERIFY);
+ RTE_CRYPTO_AUTH_OP_VERIFY,
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2);
if (retval < 0)
return retval;
/* alloc mbuf and set payload */
@@ -2632,19 +1704,21 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /* Append data which is padded to a multiple */
- /* of the algorithms block size */
- plaintext_pad_len = tdata->plaintext.len >> 3;
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- plaintext_pad_len);
- memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len >> 3);
+ plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
/* Create SNOW3G operation */
- retval = create_snow3g_hash_operation(tdata->digest.data,
+ retval = create_snow3g_kasumi_hash_operation(tdata->digest.data,
tdata->digest.len,
tdata->aad.data, tdata->aad.len,
plaintext_pad_len,
RTE_CRYPTO_AUTH_OP_VERIFY,
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
tdata->validAuthLenInBits.len,
tdata->validAuthOffsetLenInBits.len);
if (retval < 0)
@@ -2666,6 +1740,127 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
return 0;
}
+static int
+test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ unsigned plaintext_pad_len;
+ unsigned plaintext_len;
+ uint8_t *plaintext;
+
+ /* Create KASUMI session */
+ retval = create_snow3g_kasumi_hash_session(ts_params->valid_devs[0],
+ tdata->key.data, tdata->key.len,
+ tdata->aad.len, tdata->digest.len,
+ RTE_CRYPTO_AUTH_OP_GENERATE,
+ RTE_CRYPTO_AUTH_KASUMI_F9);
+ if (retval < 0)
+ return retval;
+
+ /* alloc mbuf and set payload */
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 8);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+ /* Create KASUMI operation */
+ retval = create_snow3g_kasumi_hash_operation(NULL, tdata->digest.len,
+ tdata->aad.data, tdata->aad.len,
+ plaintext_pad_len, RTE_CRYPTO_AUTH_OP_GENERATE,
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ tdata->validAuthLenInBits.len,
+ tdata->validAuthOffsetLenInBits.len);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ ut_params->obuf = ut_params->op->sym->m_src;
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+ ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + plaintext_pad_len + ALIGN_POW2_ROUNDUP(tdata->aad.len, 8);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ ut_params->digest,
+ tdata->digest.data,
+ DIGEST_BYTE_LENGTH_KASUMI_F9,
+ "KASUMI Generated auth tag not as expected");
+
+ return 0;
+}
+
+static int
+test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ unsigned plaintext_pad_len;
+ unsigned plaintext_len;
+ uint8_t *plaintext;
+
+ /* Create KASUMI session */
+ retval = create_snow3g_kasumi_hash_session(ts_params->valid_devs[0],
+ tdata->key.data, tdata->key.len,
+ tdata->aad.len, tdata->digest.len,
+ RTE_CRYPTO_AUTH_OP_VERIFY,
+ RTE_CRYPTO_AUTH_KASUMI_F9);
+ if (retval < 0)
+ return retval;
+ /* alloc mbuf and set payload */
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 8);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+ /* Create KASUMI operation */
+ retval = create_snow3g_kasumi_hash_operation(tdata->digest.data,
+ tdata->digest.len,
+ tdata->aad.data, tdata->aad.len,
+ plaintext_pad_len,
+ RTE_CRYPTO_AUTH_OP_VERIFY,
+ RTE_CRYPTO_AUTH_KASUMI_F9,
+ tdata->validAuthLenInBits.len,
+ tdata->validAuthOffsetLenInBits.len);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
+ ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + plaintext_pad_len + tdata->aad.len;
+
+ /* Validate obuf */
+ if (ut_params->op->status == RTE_CRYPTO_OP_STATUS_SUCCESS)
+ return 0;
+ else
+ return -1;
+
+ return 0;
+}
static int
test_snow3g_hash_generate_test_case_1(void)
@@ -2686,6 +1881,24 @@ test_snow3g_hash_generate_test_case_3(void)
}
static int
+test_snow3g_hash_generate_test_case_4(void)
+{
+ return test_snow3g_authentication(&snow3g_hash_test_case_4);
+}
+
+static int
+test_snow3g_hash_generate_test_case_5(void)
+{
+ return test_snow3g_authentication(&snow3g_hash_test_case_5);
+}
+
+static int
+test_snow3g_hash_generate_test_case_6(void)
+{
+ return test_snow3g_authentication(&snow3g_hash_test_case_6);
+}
+
+static int
test_snow3g_hash_verify_test_case_1(void)
{
return test_snow3g_authentication_verify(&snow3g_hash_test_case_1);
@@ -2705,6 +1918,351 @@ test_snow3g_hash_verify_test_case_3(void)
}
static int
+test_snow3g_hash_verify_test_case_4(void)
+{
+ return test_snow3g_authentication_verify(&snow3g_hash_test_case_4);
+}
+
+static int
+test_snow3g_hash_verify_test_case_5(void)
+{
+ return test_snow3g_authentication_verify(&snow3g_hash_test_case_5);
+}
+
+static int
+test_snow3g_hash_verify_test_case_6(void)
+{
+ return test_snow3g_authentication_verify(&snow3g_hash_test_case_6);
+}
+
+static int
+test_kasumi_hash_generate_test_case_1(void)
+{
+ return test_kasumi_authentication(&kasumi_hash_test_case_1);
+}
+
+static int
+test_kasumi_hash_generate_test_case_2(void)
+{
+ return test_kasumi_authentication(&kasumi_hash_test_case_2);
+}
+
+static int
+test_kasumi_hash_generate_test_case_3(void)
+{
+ return test_kasumi_authentication(&kasumi_hash_test_case_3);
+}
+
+static int
+test_kasumi_hash_generate_test_case_4(void)
+{
+ return test_kasumi_authentication(&kasumi_hash_test_case_4);
+}
+
+static int
+test_kasumi_hash_generate_test_case_5(void)
+{
+ return test_kasumi_authentication(&kasumi_hash_test_case_5);
+}
+
+static int
+test_kasumi_hash_verify_test_case_1(void)
+{
+ return test_kasumi_authentication_verify(&kasumi_hash_test_case_1);
+}
+
+static int
+test_kasumi_hash_verify_test_case_2(void)
+{
+ return test_kasumi_authentication_verify(&kasumi_hash_test_case_2);
+}
+
+static int
+test_kasumi_hash_verify_test_case_3(void)
+{
+ return test_kasumi_authentication_verify(&kasumi_hash_test_case_3);
+}
+
+static int
+test_kasumi_hash_verify_test_case_4(void)
+{
+ return test_kasumi_authentication_verify(&kasumi_hash_test_case_4);
+}
+
+static int
+test_kasumi_hash_verify_test_case_5(void)
+{
+ return test_kasumi_authentication_verify(&kasumi_hash_test_case_5);
+}
+
+static int
+test_kasumi_encryption(const struct kasumi_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ uint8_t *plaintext, *ciphertext;
+ unsigned plaintext_pad_len;
+ unsigned plaintext_len;
+
+ /* Create KASUMI session */
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ /* Clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 8);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len);
+
+ /* Create KASUMI operation */
+ retval = create_snow3g_kasumi_cipher_operation(tdata->iv.data, tdata->iv.len,
+ tdata->plaintext.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_KASUMI_F8);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+ if (ut_params->obuf)
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + tdata->iv.len;
+ else
+ ciphertext = plaintext;
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->validCipherLenInBits.len,
+ "KASUMI Ciphertext data not as expected");
+ return 0;
+}
+
+static int
+test_kasumi_encryption_oop(const struct kasumi_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ uint8_t *plaintext, *ciphertext;
+ unsigned plaintext_pad_len;
+ unsigned plaintext_len;
+
+ /* Create KASUMI session */
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ /* Clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 8);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len);
+
+ /* Create KASUMI operation */
+ retval = create_snow3g_kasumi_cipher_operation_oop(tdata->iv.data,
+ tdata->iv.len,
+ tdata->plaintext.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_KASUMI_F8);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+ if (ut_params->obuf)
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + tdata->iv.len;
+ else
+ ciphertext = plaintext;
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->validCipherLenInBits.len,
+ "KASUMI Ciphertext data not as expected");
+ return 0;
+}
+
+static int
+test_kasumi_decryption_oop(const struct kasumi_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ uint8_t *ciphertext, *plaintext;
+ unsigned ciphertext_pad_len;
+ unsigned ciphertext_len;
+
+ /* Create KASUMI session */
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ /* Clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ ciphertext_len = ceil_byte_length(tdata->ciphertext.len);
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 8);
+ ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ ciphertext_pad_len);
+ rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len);
+ memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, ciphertext_len);
+
+ /* Create KASUMI operation */
+ retval = create_snow3g_kasumi_cipher_operation_oop(tdata->iv.data,
+ tdata->iv.len,
+ tdata->ciphertext.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_KASUMI_F8);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+ if (ut_params->obuf)
+ plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + tdata->iv.len;
+ else
+ plaintext = ciphertext;
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, ciphertext_len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ plaintext,
+ tdata->plaintext.data,
+ tdata->validCipherLenInBits.len,
+ "KASUMI Plaintext data not as expected");
+ return 0;
+}
+
+static int
+test_kasumi_decryption(const struct kasumi_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ uint8_t *ciphertext, *plaintext;
+ unsigned ciphertext_pad_len;
+ unsigned ciphertext_len;
+
+ /* Create KASUMI session */
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ /* Clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ ciphertext_len = ceil_byte_length(tdata->ciphertext.len);
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 8);
+ ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ ciphertext_pad_len);
+ memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, ciphertext_len);
+
+ /* Create KASUMI operation */
+ retval = create_snow3g_kasumi_cipher_operation(tdata->iv.data,
+ tdata->iv.len,
+ tdata->ciphertext.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_KASUMI_F8);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+ if (ut_params->obuf)
+ plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + tdata->iv.len;
+ else
+ plaintext = ciphertext;
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, ciphertext_len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ plaintext,
+ tdata->plaintext.data,
+ tdata->validCipherLenInBits.len,
+ "KASUMI Plaintext data not as expected");
+ return 0;
+}
+
+static int
test_snow3g_encryption(const struct snow3g_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -2712,13 +2270,13 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
int retval;
uint8_t *plaintext, *ciphertext;
- uint8_t plaintext_pad_len;
- uint8_t lastByteValidBits = 8;
- uint8_t lastByteMask = 0xFF;
+ unsigned plaintext_pad_len;
+ unsigned plaintext_len;
/* Create SNOW3G session */
- retval = create_snow3g_cipher_session(ts_params->valid_devs[0],
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->key.data, tdata->key.len);
if (retval < 0)
return retval;
@@ -2729,24 +2287,21 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /*
- * Append data which is padded to a
- * multiple of the algorithms block size
- */
- /*tdata->plaintext.len = tdata->plaintext.len >> 3;*/
- plaintext_pad_len = RTE_ALIGN_CEIL((tdata->plaintext.len >> 3), 16);
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
- plaintext = (uint8_t *) rte_pktmbuf_append(ut_params->ibuf,
- plaintext_pad_len);
- memcpy(plaintext, tdata->plaintext.data, (tdata->plaintext.len >> 3));
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
-#endif
/* Create SNOW3G operation */
- retval = create_snow3g_cipher_operation(tdata->iv.data, tdata->iv.len,
+ retval = create_snow3g_kasumi_cipher_operation(tdata->iv.data, tdata->iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len);
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
if (retval < 0)
return retval;
@@ -2761,20 +2316,13 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
else
ciphertext = plaintext;
- lastByteValidBits = (tdata->validDataLenInBits.len % 8);
- if (lastByteValidBits == 0)
- lastByteValidBits = 8;
- lastByteMask = lastByteMask << (8 - lastByteValidBits);
- (*(ciphertext + (tdata->ciphertext.len >> 3) - 1)) &= lastByteMask;
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#endif
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
ciphertext,
tdata->ciphertext.data,
- tdata->ciphertext.len >> 3,
+ tdata->validDataLenInBits.len,
"Snow3G Ciphertext data not as expected");
return 0;
}
@@ -2788,13 +2336,107 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
uint8_t *plaintext, *ciphertext;
int retval;
- uint8_t plaintext_pad_len;
- uint8_t lastByteValidBits = 8;
- uint8_t lastByteMask = 0xFF;
+ unsigned plaintext_pad_len;
+ unsigned plaintext_len;
+
+ /* Create SNOW3G session */
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+ "Failed to allocate input buffer in mempool");
+ TEST_ASSERT_NOT_NULL(ut_params->obuf,
+ "Failed to allocate output buffer in mempool");
+
+ /* Clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len);
+
+ /* Create SNOW3G operation */
+ retval = create_snow3g_kasumi_cipher_operation_oop(tdata->iv.data,
+ tdata->iv.len,
+ tdata->validCipherLenInBits.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+ if (ut_params->obuf)
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + tdata->iv.len;
+ else
+ ciphertext = plaintext;
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->validDataLenInBits.len,
+ "Snow3G Ciphertext data not as expected");
+ return 0;
+}
+
+/* Shift right a buffer by "offset" bits, "offset" < 8 */
+static void
+buffer_shift_right(uint8_t *buffer, uint32_t length, uint8_t offset)
+{
+ uint8_t curr_byte, prev_byte;
+ uint32_t length_in_bytes = ceil_byte_length(length + offset);
+ uint8_t lower_byte_mask = (1 << offset) - 1;
+ unsigned i;
+
+ prev_byte = buffer[0];
+ buffer[0] >>= offset;
+
+ for (i = 1; i < length_in_bytes; i++) {
+ curr_byte = buffer[i];
+ buffer[i] = ((prev_byte & lower_byte_mask) << (8 - offset)) |
+ (curr_byte >> offset);
+ prev_byte = curr_byte;
+ }
+}
+
+static int
+test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+ uint8_t *plaintext, *ciphertext;
+ int retval;
+ uint32_t plaintext_len;
+ uint32_t plaintext_pad_len;
+ uint8_t extra_offset = 4;
+ uint8_t *expected_ciphertext_shifted;
/* Create SNOW3G session */
- retval = create_snow3g_cipher_session(ts_params->valid_devs[0],
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->key.data, tdata->key.len);
if (retval < 0)
return retval;
@@ -2811,29 +2453,31 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
+ plaintext_len = ceil_byte_length(tdata->plaintext.len + extra_offset);
/*
* Append data which is padded to a
* multiple of the algorithms block size
*/
- /*tdata->plaintext.len = tdata->plaintext.len >> 3;*/
- plaintext_pad_len = RTE_ALIGN_CEIL((tdata->plaintext.len >> 3), 16);
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
plaintext = (uint8_t *) rte_pktmbuf_append(ut_params->ibuf,
plaintext_pad_len);
- rte_pktmbuf_append(ut_params->obuf,
- plaintext_pad_len);
+ rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
memcpy(plaintext, tdata->plaintext.data, (tdata->plaintext.len >> 3));
+ buffer_shift_right(plaintext, tdata->plaintext.len, extra_offset);
#ifdef RTE_APP_TEST_DEBUG
rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
#endif
/* Create SNOW3G operation */
- retval = create_snow3g_cipher_operation_oop(tdata->iv.data,
+ retval = create_snow3g_kasumi_cipher_operation_oop(tdata->iv.data,
tdata->iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len);
+ tdata->validCipherOffsetLenInBits.len +
+ extra_offset,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
if (retval < 0)
return retval;
@@ -2848,25 +2492,30 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
else
ciphertext = plaintext;
- lastByteValidBits = (tdata->validDataLenInBits.len % 8);
- if (lastByteValidBits == 0)
- lastByteValidBits = 8;
- lastByteMask = lastByteMask << (8 - lastByteValidBits);
- (*(ciphertext + (tdata->ciphertext.len >> 3) - 1)) &= lastByteMask;
-
#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+ rte_hexdump(stdout, "ciphertext:", ciphertext, plaintext_len);
#endif
+
+ expected_ciphertext_shifted = rte_malloc(NULL,
+ ceil_byte_length(plaintext_len + extra_offset), 0);
+
+ TEST_ASSERT_NOT_NULL(expected_ciphertext_shifted,
+ "failed to reserve memory for ciphertext shifted\n");
+
+ memcpy(expected_ciphertext_shifted, tdata->ciphertext.data,
+ ceil_byte_length(tdata->ciphertext.len));
+ buffer_shift_right(expected_ciphertext_shifted, tdata->ciphertext.len,
+ extra_offset);
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT_OFFSET(
ciphertext,
- tdata->ciphertext.data,
- tdata->ciphertext.len >> 3,
+ expected_ciphertext_shifted,
+ tdata->validDataLenInBits.len,
+ extra_offset,
"Snow3G Ciphertext data not as expected");
return 0;
}
-
static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -2875,13 +2524,13 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
int retval;
uint8_t *plaintext, *ciphertext;
- uint8_t ciphertext_pad_len;
- uint8_t lastByteValidBits = 8;
- uint8_t lastByteMask = 0xFF;
+ unsigned ciphertext_pad_len;
+ unsigned ciphertext_len;
/* Create SNOW3G session */
- retval = create_snow3g_cipher_session(ts_params->valid_devs[0],
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->key.data, tdata->key.len);
if (retval < 0)
return retval;
@@ -2892,48 +2541,40 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /*
- * Append data which is padded to a
- * multiple of the algorithms block size
- */
- ciphertext_pad_len = RTE_ALIGN_CEIL((tdata->ciphertext.len >> 3), 16);
+ ciphertext_len = ceil_byte_length(tdata->ciphertext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16);
+ ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ ciphertext_pad_len);
+ memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
- ciphertext = (uint8_t *) rte_pktmbuf_append(ut_params->ibuf,
- ciphertext_pad_len);
- memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len >> 3);
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#endif
/* Create SNOW3G operation */
- retval = create_snow3g_cipher_operation(tdata->iv.data, tdata->iv.len,
+ retval = create_snow3g_kasumi_cipher_operation(tdata->iv.data, tdata->iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len);
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
if (retval < 0)
return retval;
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
- ut_params->obuf = ut_params->op->sym->m_src;
+ ut_params->obuf = ut_params->op->sym->m_dst;
if (ut_params->obuf)
plaintext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ tdata->iv.len;
else
plaintext = ciphertext;
- lastByteValidBits = (tdata->validDataLenInBits.len % 8);
- if (lastByteValidBits == 0)
- lastByteValidBits = 8;
- lastByteMask = lastByteMask << (8 - lastByteValidBits);
- (*(ciphertext + (tdata->ciphertext.len >> 3) - 1)) &= lastByteMask;
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
-#endif
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len);
+
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(plaintext,
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(plaintext,
tdata->plaintext.data,
- tdata->plaintext.len >> 3,
+ tdata->validDataLenInBits.len,
"Snow3G Plaintext data not as expected");
return 0;
}
@@ -2946,13 +2587,13 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
int retval;
uint8_t *plaintext, *ciphertext;
- uint8_t ciphertext_pad_len;
- uint8_t lastByteValidBits = 8;
- uint8_t lastByteMask = 0xFF;
+ unsigned ciphertext_pad_len;
+ unsigned ciphertext_len;
/* Create SNOW3G session */
- retval = create_snow3g_cipher_session(ts_params->valid_devs[0],
+ retval = create_snow3g_kasumi_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->key.data, tdata->key.len);
if (retval < 0)
return retval;
@@ -2972,28 +2613,23 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->obuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->obuf));
- /*
- * Append data which is padded to a
- * multiple of the algorithms block size
- */
- ciphertext_pad_len = RTE_ALIGN_CEIL((tdata->ciphertext.len >> 3), 16);
-
- ciphertext = (uint8_t *) rte_pktmbuf_append(ut_params->ibuf,
- ciphertext_pad_len);
-
- rte_pktmbuf_append(ut_params->obuf,
- ciphertext_pad_len);
+ ciphertext_len = ceil_byte_length(tdata->ciphertext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16);
+ ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ ciphertext_pad_len);
+ rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len);
+ memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
- memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len >> 3);
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#endif
/* Create SNOW3G operation */
- retval = create_snow3g_cipher_operation_oop(tdata->iv.data,
+ retval = create_snow3g_kasumi_cipher_operation_oop(tdata->iv.data,
tdata->iv.len,
tdata->validCipherLenInBits.len,
- tdata->validCipherOffsetLenInBits.len);
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
if (retval < 0)
return retval;
@@ -3006,19 +2642,13 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
+ tdata->iv.len;
else
plaintext = ciphertext;
- lastByteValidBits = (tdata->validDataLenInBits.len % 8);
- if (lastByteValidBits == 0)
- lastByteValidBits = 8;
- lastByteMask = lastByteMask << (8 - lastByteValidBits);
- (*(plaintext + (tdata->ciphertext.len >> 3) - 1)) &= lastByteMask;
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
-#endif
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len);
+
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(plaintext,
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(plaintext,
tdata->plaintext.data,
- tdata->plaintext.len >> 3,
+ tdata->validDataLenInBits.len,
"Snow3G Plaintext data not as expected");
return 0;
}
@@ -3032,14 +2662,15 @@ test_snow3g_authenticated_encryption(const struct snow3g_test_data *tdata)
int retval;
uint8_t *plaintext, *ciphertext;
- uint8_t plaintext_pad_len;
- uint8_t lastByteValidBits = 8;
- uint8_t lastByteMask = 0xFF;
+ unsigned plaintext_pad_len;
+ unsigned plaintext_len;
/* Create SNOW3G session */
- retval = create_snow3g_cipher_auth_session(ts_params->valid_devs[0],
+ retval = create_snow3g_kasumi_cipher_auth_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_AUTH_OP_GENERATE,
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->digest.len);
if (retval < 0)
@@ -3050,28 +2681,29 @@ test_snow3g_authenticated_encryption(const struct snow3g_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /* Append data which is padded to a multiple */
- /* of the algorithms block size */
- plaintext_pad_len = tdata->plaintext.len >> 3;
-
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- plaintext_pad_len);
- memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len >> 3);
+ plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
-#endif
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len);
/* Create SNOW3G operation */
- retval = create_snow3g_cipher_hash_operation(tdata->digest.data,
+ retval = create_snow3g_kasumi_cipher_hash_operation(tdata->digest.data,
tdata->digest.len, tdata->aad.data,
tdata->aad.len, /*tdata->plaintext.len,*/
plaintext_pad_len, RTE_CRYPTO_AUTH_OP_GENERATE,
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->iv.data, tdata->iv.len,
tdata->validCipherLenInBits.len,
tdata->validCipherOffsetLenInBits.len,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len);
+ tdata->validAuthOffsetLenInBits.len
+ );
if (retval < 0)
return retval;
@@ -3084,20 +2716,14 @@ test_snow3g_authenticated_encryption(const struct snow3g_test_data *tdata)
+ tdata->iv.len;
else
ciphertext = plaintext;
- lastByteValidBits = (tdata->validDataLenInBits.len % 8);
- if (lastByteValidBits == 0)
- lastByteValidBits = 8;
- lastByteMask = lastByteMask << (8-lastByteValidBits);
- (*(ciphertext + (tdata->ciphertext.len >> 3) - 1)) &= lastByteMask;
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#endif
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
ciphertext,
tdata->ciphertext.data,
- tdata->ciphertext.len >> 3,
+ tdata->validDataLenInBits.len,
"Snow3G Ciphertext data not as expected");
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
@@ -3120,14 +2746,15 @@ test_snow3g_encrypted_authentication(const struct snow3g_test_data *tdata)
int retval;
uint8_t *plaintext, *ciphertext;
- uint8_t plaintext_pad_len;
- uint8_t lastByteValidBits = 8;
- uint8_t lastByteMask = 0xFF;
+ unsigned plaintext_pad_len;
+ unsigned plaintext_len;
/* Create SNOW3G session */
- retval = create_snow3g_auth_cipher_session(ts_params->valid_devs[0],
+ retval = create_snow3g_kasumi_auth_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
RTE_CRYPTO_AUTH_OP_GENERATE,
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
tdata->key.data, tdata->key.len,
tdata->aad.len, tdata->digest.len);
if (retval < 0)
@@ -3139,20 +2766,18 @@ test_snow3g_encrypted_authentication(const struct snow3g_test_data *tdata)
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /* Append data which is padded to a multiple */
- /* of the algorithms block size */
- plaintext_pad_len = RTE_ALIGN_CEIL((tdata->plaintext.len >> 3), 8);
-
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- plaintext_pad_len);
- memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len >> 3);
+ plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
-#endif
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len);
/* Create SNOW3G operation */
- retval = create_snow3g_auth_cipher_operation(
+ retval = create_snow3g_kasumi_auth_cipher_operation(
tdata->digest.len,
tdata->iv.data, tdata->iv.len,
tdata->aad.data, tdata->aad.len,
@@ -3160,7 +2785,9 @@ test_snow3g_encrypted_authentication(const struct snow3g_test_data *tdata)
tdata->validCipherLenInBits.len,
tdata->validCipherOffsetLenInBits.len,
tdata->validAuthLenInBits.len,
- tdata->validAuthOffsetLenInBits.len
+ tdata->validAuthOffsetLenInBits.len,
+ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2
);
if (retval < 0)
@@ -3176,21 +2803,15 @@ test_snow3g_encrypted_authentication(const struct snow3g_test_data *tdata)
else
ciphertext = plaintext;
- lastByteValidBits = (tdata->validDataLenInBits.len % 8);
- if (lastByteValidBits == 0)
- lastByteValidBits = 8;
- lastByteMask = lastByteMask << (8-lastByteValidBits);
- (*(ciphertext + (tdata->ciphertext.len >> 3) - 1)) &= lastByteMask;
ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ plaintext_pad_len + tdata->aad.len + tdata->iv.len;
- #ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#endif
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
ciphertext,
tdata->ciphertext.data,
- tdata->ciphertext.len >> 3,
+ tdata->validDataLenInBits.len,
"Snow3G Ciphertext data not as expected");
/* Validate obuf */
@@ -3203,6 +2824,77 @@ test_snow3g_encrypted_authentication(const struct snow3g_test_data *tdata)
}
static int
+test_kasumi_encryption_test_case_1(void)
+{
+ return test_kasumi_encryption(&kasumi_test_case_1);
+}
+
+static int
+test_kasumi_encryption_test_case_1_oop(void)
+{
+ return test_kasumi_encryption_oop(&kasumi_test_case_1);
+}
+
+static int
+test_kasumi_encryption_test_case_2(void)
+{
+ return test_kasumi_encryption(&kasumi_test_case_2);
+}
+
+static int
+test_kasumi_encryption_test_case_3(void)
+{
+ return test_kasumi_encryption(&kasumi_test_case_3);
+}
+
+static int
+test_kasumi_encryption_test_case_4(void)
+{
+ return test_kasumi_encryption(&kasumi_test_case_4);
+}
+
+static int
+test_kasumi_encryption_test_case_5(void)
+{
+ return test_kasumi_encryption(&kasumi_test_case_5);
+}
+
+static int
+test_kasumi_decryption_test_case_1(void)
+{
+ return test_kasumi_decryption(&kasumi_test_case_1);
+}
+
+static int
+test_kasumi_decryption_test_case_1_oop(void)
+{
+ return test_kasumi_decryption_oop(&kasumi_test_case_1);
+}
+
+static int
+test_kasumi_decryption_test_case_2(void)
+{
+ return test_kasumi_decryption(&kasumi_test_case_2);
+}
+
+static int
+test_kasumi_decryption_test_case_3(void)
+{
+ return test_kasumi_decryption(&kasumi_test_case_3);
+}
+
+static int
+test_kasumi_decryption_test_case_4(void)
+{
+ return test_kasumi_decryption(&kasumi_test_case_4);
+}
+
+static int
+test_kasumi_decryption_test_case_5(void)
+{
+ return test_kasumi_decryption(&kasumi_test_case_5);
+}
+static int
test_snow3g_encryption_test_case_1(void)
{
return test_snow3g_encryption(&snow3g_test_case_1);
@@ -3215,6 +2907,12 @@ test_snow3g_encryption_test_case_1_oop(void)
}
static int
+test_snow3g_encryption_test_case_1_offset_oop(void)
+{
+ return test_snow3g_encryption_offset_oop(&snow3g_test_case_1);
+}
+
+static int
test_snow3g_encryption_test_case_2(void)
{
return test_snow3g_encryption(&snow3g_test_case_2);
@@ -3308,9 +3006,8 @@ create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op,
ut_params->cipher_xform.cipher.key.data = cipher_key;
ut_params->cipher_xform.cipher.key.length = key_len;
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "key:", key, key_len);
-#endif
+ TEST_HEXDUMP(stdout, "key:", key, key_len);
+
/* Setup Authentication Parameters */
ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -3371,11 +3068,9 @@ create_gcm_operation(enum rte_crypto_cipher_operation op,
if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
rte_memcpy(sym_op->auth.digest.data, auth_tag, auth_tag_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "digest:",
- ut_params->op->digest.data,
- ut_params->op->digest.length);
-#endif
+ TEST_HEXDUMP(stdout, "digest:",
+ sym_op->auth.digest.data,
+ sym_op->auth.digest.length);
}
/* iv */
@@ -3415,11 +3110,10 @@ create_gcm_operation(enum rte_crypto_cipher_operation op,
memset(sym_op->auth.aad.data, 0, aad_buffer_len);
rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "iv:", ut_params->op->iv.data, iv_pad_len);
- rte_hexdump(stdout, "aad:",
- ut_params->op->additional_auth.data, aad_len);
-#endif
+ TEST_HEXDUMP(stdout, "iv:", sym_op->cipher.iv.data, iv_pad_len);
+ TEST_HEXDUMP(stdout, "aad:",
+ sym_op->auth.aad.data, aad_len);
+
sym_op->cipher.data.length = data_len;
sym_op->cipher.data.offset = aad_buffer_len + iv_pad_len;
@@ -3465,9 +3159,8 @@ test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
plaintext_pad_len);
memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "plaintext:", plaintext, tdata->plaintext.len);
-#endif
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len);
+
/* Create GCM opertaion */
retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT,
tdata->auth_tag.data, tdata->auth_tag.len,
@@ -3498,10 +3191,9 @@ test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
auth_tag = plaintext + plaintext_pad_len;
}
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
- rte_hexdump(stdout, "auth tag:", auth_tag, tdata->auth_tag.len);
-#endif
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+ TEST_HEXDUMP(stdout, "auth tag:", auth_tag, tdata->auth_tag.len);
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
ciphertext,
@@ -3593,9 +3285,8 @@ test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
ciphertext_pad_len);
memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-#endif
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+
/* Create GCM opertaion */
retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT,
tdata->auth_tag.data, tdata->auth_tag.len,
@@ -3623,9 +3314,8 @@ test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
else
plaintext = ciphertext;
-#ifdef RTE_APP_TEST_DEBUG
- rte_hexdump(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
-#endif
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
plaintext,
@@ -3781,12 +3471,19 @@ test_multi_session(void)
/*
* free mbuf - both obuf and ibuf are usually the same,
- * but rte copes even if we call free twice
+ * so check if they point at the same address is necessary,
+ * to avoid freeing the mbuf twice.
*/
if (ut_params->obuf) {
rte_pktmbuf_free(ut_params->obuf);
+ if (ut_params->ibuf == ut_params->obuf)
+ ut_params->ibuf = 0;
ut_params->obuf = 0;
}
+ if (ut_params->ibuf) {
+ rte_pktmbuf_free(ut_params->ibuf);
+ ut_params->ibuf = 0;
+ }
}
/* Next session create should fail */
@@ -3805,92 +3502,6 @@ test_multi_session(void)
}
static int
-test_not_in_place_crypto(void)
-{
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
- struct rte_mbuf *dst_m = rte_pktmbuf_alloc(ts_params->mbuf_pool);
-
- test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(ut_params);
-
- /* Create multiple crypto sessions*/
-
- ut_params->sess = rte_cryptodev_sym_session_create(
- ts_params->valid_devs[0], &ut_params->auth_xform);
-
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
-
-
- /* Generate test mbuf data and digest */
- ut_params->ibuf = setup_test_string(ts_params->mbuf_pool,
- (const char *)
- catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
- QUOTE_512_BYTES, 0);
-
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- DIGEST_BYTE_LENGTH_SHA512);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
-
- rte_memcpy(ut_params->digest,
- catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA512_digest,
- DIGEST_BYTE_LENGTH_SHA512);
-
- /* Generate Crypto op data structure */
- ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- TEST_ASSERT_NOT_NULL(ut_params->op,
- "Failed to allocate symmetric crypto operation struct");
-
-
- /* Set crypto operation data parameters */
- rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
-
- struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
-
- /* set crypto operation source mbuf */
- sym_op->m_src = ut_params->ibuf;
- sym_op->m_dst = dst_m;
-
- sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, QUOTE_512_BYTES);
- sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
-
- sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->auth.data.length = QUOTE_512_BYTES;
-
-
- sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, 0);
- sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
-
- sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- sym_op->cipher.data.length = QUOTE_512_BYTES;
-
- /* Process crypto operation */
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
- TEST_ASSERT_NOT_NULL(ut_params->op, "no crypto operation returned");
-
- TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
- "crypto operation processing failed");
-
- /* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->sym->m_dst, char *),
- catch_22_quote,
- QUOTE_512_BYTES,
- "Plaintext data not as expected");
-
- return TEST_SUCCESS;
-}
-
-static int
test_null_cipher_only_operation(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -4260,30 +3871,7 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_multi_session),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA1_encrypt_digest_oop),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA1_decrypt_digest_oop_ver),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA1_encrypt_digest),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA1_decrypt_digest_verify),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA256_encrypt_digest),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA256_decrypt_digest_verify),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA512_encrypt_digest),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA512_decrypt_digest_verify),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_AES_XCBC_encrypt_digest),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_stats),
/** AES GCM Authenticated Encryption */
@@ -4371,31 +3959,7 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
.setup = testsuite_setup,
.teardown = testsuite_teardown,
.unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA1_encrypt_digest),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA1_decrypt_digest_verify),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA256_encrypt_digest),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA256_decrypt_digest_verify),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA512_encrypt_digest),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA512_decrypt_digest_verify),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_AES_XCBC_encrypt_digest),
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless),
-
- TEST_CASE_ST(ut_setup, ut_teardown,
- test_not_in_place_crypto),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_mb_all),
TEST_CASES_END() /**< NULL terminate unit test array */
}
@@ -4442,6 +4006,64 @@ static struct unit_test_suite cryptodev_aesni_gcm_testsuite = {
}
};
+static struct unit_test_suite cryptodev_sw_kasumi_testsuite = {
+ .suite_name = "Crypto Device SW KASUMI Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ /** KASUMI encrypt only (UEA1) */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_5),
+ /** KASUMI decrypt only (UEA1) */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_5),
+
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1_oop),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1_oop),
+
+ /** KASUMI hash only (UIA1) */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_5),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
static struct unit_test_suite cryptodev_sw_snow3g_testsuite = {
.suite_name = "Crypto Device SW Snow3G Unit Test Suite",
.setup = testsuite_setup,
@@ -4459,6 +4081,13 @@ static struct unit_test_suite cryptodev_sw_snow3g_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_encryption_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1_oop),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_decryption_test_case_1_oop),
+
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1_offset_oop),
/** Snow3G decrypt only (UEA2) */
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -4477,12 +4106,26 @@ static struct unit_test_suite cryptodev_sw_snow3g_testsuite = {
test_snow3g_hash_generate_test_case_2),
TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_hash_generate_test_case_3),
+ /* Tests with buffers which length is not byte-aligned */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_hash_generate_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_hash_generate_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_hash_generate_test_case_6),
TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_hash_verify_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_hash_verify_test_case_2),
TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_hash_verify_test_case_3),
+ /* Tests with buffers which length is not byte-aligned */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_hash_verify_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_hash_verify_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_hash_verify_test_case_6),
TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_authenticated_encryption_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -4577,8 +4220,22 @@ static struct test_command cryptodev_sw_snow3g_cmd = {
.callback = test_cryptodev_sw_snow3g,
};
+static int
+test_cryptodev_sw_kasumi(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_cryptodev_type = RTE_CRYPTODEV_KASUMI_PMD;
+
+ return unit_test_suite_runner(&cryptodev_sw_kasumi_testsuite);
+}
+
+static struct test_command cryptodev_sw_kasumi_cmd = {
+ .command = "cryptodev_sw_kasumi_autotest",
+ .callback = test_cryptodev_sw_kasumi,
+};
+
REGISTER_TEST_COMMAND(cryptodev_qat_cmd);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_cmd);
REGISTER_TEST_COMMAND(cryptodev_aesni_gcm_cmd);
REGISTER_TEST_COMMAND(cryptodev_null_cmd);
REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_cmd);
+REGISTER_TEST_COMMAND(cryptodev_sw_kasumi_cmd);
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 6059a01c..3c37c323 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -46,7 +46,7 @@
#define DEFAULT_BURST_SIZE (64)
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
-#define MBUF_CACHE_SIZE (250)
+#define MBUF_CACHE_SIZE (256)
#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
@@ -61,10 +61,13 @@
#define DIGEST_BYTE_LENGTH_SHA512 (BYTE_LENGTH(512))
#define DIGEST_BYTE_LENGTH_AES_XCBC (BYTE_LENGTH(96))
#define DIGEST_BYTE_LENGTH_SNOW3G_UIA2 (BYTE_LENGTH(32))
+#define DIGEST_BYTE_LENGTH_KASUMI_F9 (BYTE_LENGTH(32))
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA384 (24)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_aes.c b/app/test/test_cryptodev_aes.c
new file mode 100644
index 00000000..bf832b62
--- /dev/null
+++ b/app/test/test_cryptodev_aes.c
@@ -0,0 +1,683 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "test.h"
+#include "test_cryptodev_aes.h"
+
+#ifndef AES_TEST_MSG_LEN
+#define AES_TEST_MSG_LEN 256
+#endif
+
+#define AES_TEST_OP_ENCRYPT 0x01
+#define AES_TEST_OP_DECRYPT 0x02
+#define AES_TEST_OP_AUTH_GEN 0x04
+#define AES_TEST_OP_AUTH_VERIFY 0x08
+
+#define AES_TEST_FEATURE_OOP 0x01
+#define AES_TEST_FEATURE_SESSIONLESS 0x02
+#define AES_TEST_FEATURE_STOPPER 0x04 /* stop upon failing */
+
+#define AES_TEST_TARGET_PMD_MB 0x0001 /* Multi-buffer flag */
+#define AES_TEST_TARGET_PMD_QAT 0x0002 /* QAT flag */
+
+#define AES_TEST_OP_CIPHER (AES_TEST_OP_ENCRYPT | \
+ AES_TEST_OP_DECRYPT)
+
+#define AES_TEST_OP_AUTH (AES_TEST_OP_AUTH_GEN | \
+ AES_TEST_OP_AUTH_VERIFY)
+
+#define AES_TEST_OP_ENC_AUTH_GEN (AES_TEST_OP_ENCRYPT | \
+ AES_TEST_OP_AUTH_GEN)
+
+#define AES_TEST_OP_AUTH_VERIFY_DEC (AES_TEST_OP_DECRYPT | \
+ AES_TEST_OP_AUTH_VERIFY)
+
+struct aes_test_case {
+ const char *test_descr; /* test description */
+ const struct aes_test_data *test_data;
+ uint8_t op_mask; /* operation mask */
+ uint8_t feature_mask;
+ uint32_t pmd_mask;
+};
+
+static const struct aes_test_case aes_test_cases[] = {
+ {
+ .test_descr = "AES-128-CTR HMAC-SHA1 Encryption Digest",
+ .test_data = &aes_test_data_1,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
+ "Verify",
+ .test_data = &aes_test_data_1,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-192-CTR XCBC Encryption Digest",
+ .test_data = &aes_test_data_2,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-192-CTR XCBC Decryption Digest Verify",
+ .test_data = &aes_test_data_2,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-256-CTR HMAC-SHA1 Encryption Digest",
+ .test_data = &aes_test_data_3,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
+ "Verify",
+ .test_data = &aes_test_data_3,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
+ .test_data = &aes_test_data_4,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
+ "Verify",
+ .test_data = &aes_test_data_4,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest",
+ .test_data = &aes_test_data_5,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
+ "Verify",
+ .test_data = &aes_test_data_5,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest",
+ .test_data = &aes_test_data_6,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
+ "Sessionless",
+ .test_data = &aes_test_data_6,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .feature_mask = AES_TEST_FEATURE_SESSIONLESS,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
+ "Verify",
+ .test_data = &aes_test_data_6,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC XCBC Encryption Digest",
+ .test_data = &aes_test_data_7,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC XCBC Decryption Digest Verify",
+ .test_data = &aes_test_data_7,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB |
+ AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
+ "OOP",
+ .test_data = &aes_test_data_4,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .feature_mask = AES_TEST_FEATURE_OOP,
+ .pmd_mask = AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
+ "Verify OOP",
+ .test_data = &aes_test_data_4,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .feature_mask = AES_TEST_FEATURE_OOP,
+ .pmd_mask = AES_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA224 Encryption Digest",
+ .test_data = &aes_test_data_8,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
+ "Verify",
+ .test_data = &aes_test_data_8,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
+ .test_data = &aes_test_data_9,
+ .op_mask = AES_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
+ "Verify",
+ .test_data = &aes_test_data_9,
+ .op_mask = AES_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = AES_TEST_TARGET_PMD_MB
+ },
+};
+
+static int
+test_AES_one_case(const struct aes_test_case *t,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mempool *op_mpool,
+ uint8_t dev_id,
+ enum rte_cryptodev_type cryptodev_type,
+ char *test_msg)
+{
+ struct rte_mbuf *ibuf = NULL;
+ struct rte_mbuf *obuf = NULL;
+ struct rte_mbuf *iobuf;
+ struct rte_crypto_sym_xform *cipher_xform = NULL;
+ struct rte_crypto_sym_xform *auth_xform = NULL;
+ struct rte_crypto_sym_xform *init_xform = NULL;
+ struct rte_crypto_sym_op *sym_op = NULL;
+ struct rte_crypto_op *op = NULL;
+ struct rte_cryptodev_sym_session *sess = NULL;
+
+ int status = TEST_SUCCESS;
+ const struct aes_test_data *tdata = t->test_data;
+ uint8_t cipher_key[tdata->cipher_key.len];
+ uint8_t auth_key[tdata->auth_key.len];
+ uint32_t buf_len = tdata->ciphertext.len;
+ uint32_t digest_len = 0;
+ char *buf_p = NULL;
+
+ if (tdata->cipher_key.len)
+ memcpy(cipher_key, tdata->cipher_key.data,
+ tdata->cipher_key.len);
+ if (tdata->auth_key.len)
+ memcpy(auth_key, tdata->auth_key.data,
+ tdata->auth_key.len);
+
+ switch (cryptodev_type) {
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
+ digest_len = tdata->digest.len;
+ break;
+ case RTE_CRYPTODEV_AESNI_MB_PMD:
+ digest_len = tdata->digest.truncated_len;
+ break;
+ default:
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u FAILED: %s",
+ __LINE__, "Unsupported PMD type");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* preparing data */
+ ibuf = rte_pktmbuf_alloc(mbuf_pool);
+ if (!ibuf) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u FAILED: %s",
+ __LINE__, "Allocation of rte_mbuf failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (t->op_mask & AES_TEST_OP_CIPHER)
+ buf_len += tdata->iv.len;
+ if (t->op_mask & AES_TEST_OP_AUTH)
+ buf_len += digest_len;
+
+ buf_p = rte_pktmbuf_append(ibuf, buf_len);
+ if (!buf_p) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u FAILED: %s",
+ __LINE__, "No room to append mbuf");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (t->op_mask & AES_TEST_OP_CIPHER) {
+ rte_memcpy(buf_p, tdata->iv.data, tdata->iv.len);
+ buf_p += tdata->iv.len;
+ }
+
+ /* only encryption requires plaintext.data input,
+ * decryption/(digest gen)/(digest verify) use ciphertext.data
+ * to be computed */
+ if (t->op_mask & AES_TEST_OP_ENCRYPT) {
+ rte_memcpy(buf_p, tdata->plaintext.data,
+ tdata->plaintext.len);
+ buf_p += tdata->plaintext.len;
+ } else {
+ rte_memcpy(buf_p, tdata->ciphertext.data,
+ tdata->ciphertext.len);
+ buf_p += tdata->ciphertext.len;
+ }
+
+ if (t->op_mask & AES_TEST_OP_AUTH_VERIFY)
+ rte_memcpy(buf_p, tdata->digest.data, digest_len);
+ else
+ memset(buf_p, 0, digest_len);
+
+ if (t->feature_mask & AES_TEST_FEATURE_OOP) {
+ obuf = rte_pktmbuf_alloc(mbuf_pool);
+ if (!obuf) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: %s", __LINE__,
+ "Allocation of rte_mbuf failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ buf_p = rte_pktmbuf_append(obuf, buf_len);
+ if (!buf_p) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: %s", __LINE__,
+ "No room to append mbuf");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ memset(buf_p, 0, buf_len);
+ }
+
+ /* Generate Crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ if (!op) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u FAILED: %s",
+ __LINE__, "Failed to allocate symmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ sym_op = op->sym;
+
+ sym_op->m_src = ibuf;
+
+ if (t->feature_mask & AES_TEST_FEATURE_OOP) {
+ sym_op->m_dst = obuf;
+ iobuf = obuf;
+ } else {
+ sym_op->m_dst = NULL;
+ iobuf = ibuf;
+ }
+
+ /* sessionless op requires allocate xform using
+ * rte_crypto_op_sym_xforms_alloc(), otherwise rte_zmalloc()
+ * is used */
+ if (t->feature_mask & AES_TEST_FEATURE_SESSIONLESS) {
+ uint32_t n_xforms = 0;
+
+ if (t->op_mask & AES_TEST_OP_CIPHER)
+ n_xforms++;
+ if (t->op_mask & AES_TEST_OP_AUTH)
+ n_xforms++;
+
+ if (rte_crypto_op_sym_xforms_alloc(op, n_xforms)
+ == NULL) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: %s", __LINE__, "Failed to "
+ "allocate space for crypto transforms");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ } else {
+ cipher_xform = rte_zmalloc(NULL,
+ sizeof(struct rte_crypto_sym_xform), 0);
+
+ auth_xform = rte_zmalloc(NULL,
+ sizeof(struct rte_crypto_sym_xform), 0);
+
+ if (!cipher_xform || !auth_xform) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: %s", __LINE__, "Failed to "
+ "allocate memory for crypto transforms");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ }
+
+ /* preparing xform, for sessioned op, init_xform is initialized
+ * here and later as param in rte_cryptodev_sym_session_create()
+ * call */
+ if (t->op_mask == AES_TEST_OP_ENC_AUTH_GEN) {
+ if (t->feature_mask & AES_TEST_FEATURE_SESSIONLESS) {
+ cipher_xform = op->sym->xform;
+ auth_xform = cipher_xform->next;
+ auth_xform->next = NULL;
+ } else {
+ cipher_xform->next = auth_xform;
+ auth_xform->next = NULL;
+ init_xform = cipher_xform;
+ }
+ } else if (t->op_mask == AES_TEST_OP_AUTH_VERIFY_DEC) {
+ if (t->feature_mask & AES_TEST_FEATURE_SESSIONLESS) {
+ auth_xform = op->sym->xform;
+ cipher_xform = auth_xform->next;
+ cipher_xform->next = NULL;
+ } else {
+ auth_xform->next = cipher_xform;
+ cipher_xform->next = NULL;
+ init_xform = auth_xform;
+ }
+ } else if ((t->op_mask == AES_TEST_OP_ENCRYPT) ||
+ (t->op_mask == AES_TEST_OP_DECRYPT)) {
+ if (t->feature_mask & AES_TEST_FEATURE_SESSIONLESS)
+ cipher_xform = op->sym->xform;
+ else
+ init_xform = cipher_xform;
+ cipher_xform->next = NULL;
+ } else if ((t->op_mask == AES_TEST_OP_AUTH_GEN) ||
+ (t->op_mask == AES_TEST_OP_AUTH_VERIFY)) {
+ if (t->feature_mask & AES_TEST_FEATURE_SESSIONLESS)
+ auth_xform = op->sym->xform;
+ else
+ init_xform = auth_xform;
+ auth_xform->next = NULL;
+ } else {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u FAILED: %s",
+ __LINE__, "Unrecognized operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /*configure xforms & sym_op cipher and auth data*/
+ if (t->op_mask & AES_TEST_OP_CIPHER) {
+ cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform->cipher.algo = tdata->crypto_algo;
+ if (t->op_mask & AES_TEST_OP_ENCRYPT)
+ cipher_xform->cipher.op =
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ else
+ cipher_xform->cipher.op =
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ cipher_xform->cipher.key.data = cipher_key;
+ cipher_xform->cipher.key.length = tdata->cipher_key.len;
+
+ sym_op->cipher.data.offset = tdata->iv.len;
+ sym_op->cipher.data.length = tdata->ciphertext.len;
+ sym_op->cipher.iv.data = rte_pktmbuf_mtod(sym_op->m_src,
+ uint8_t *);
+ sym_op->cipher.iv.length = tdata->iv.len;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(
+ sym_op->m_src);
+ }
+
+ if (t->op_mask & AES_TEST_OP_AUTH) {
+ uint32_t auth_data_offset = 0;
+ uint32_t digest_offset = tdata->ciphertext.len;
+
+ if (t->op_mask & AES_TEST_OP_CIPHER) {
+ digest_offset += tdata->iv.len;
+ auth_data_offset += tdata->iv.len;
+ }
+
+ auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform->auth.algo = tdata->auth_algo;
+ auth_xform->auth.key.length = tdata->auth_key.len;
+ auth_xform->auth.key.data = auth_key;
+ auth_xform->auth.digest_length = digest_len;
+
+ if (t->op_mask & AES_TEST_OP_AUTH_GEN) {
+ auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->auth.digest.data = rte_pktmbuf_mtod_offset
+ (iobuf, uint8_t *, digest_offset);
+ sym_op->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(iobuf,
+ digest_offset);
+ } else {
+ auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+ sym_op->auth.digest.data = rte_pktmbuf_mtod_offset
+ (sym_op->m_src, uint8_t *, digest_offset);
+ sym_op->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(sym_op->m_src,
+ digest_offset);
+ }
+
+ sym_op->auth.data.offset = auth_data_offset;
+ sym_op->auth.data.length = tdata->ciphertext.len;
+ sym_op->auth.digest.length = digest_len;
+ }
+
+ /* create session for sessioned op */
+ if (!(t->feature_mask & AES_TEST_FEATURE_SESSIONLESS)) {
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ init_xform);
+ if (!sess) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach symmetric crypto session to crypto operations */
+ rte_crypto_op_attach_sym_session(op, sess);
+ }
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
+
+ if (!op) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ TEST_HEXDUMP(stdout, "m_src:",
+ rte_pktmbuf_mtod(sym_op->m_src, uint8_t *), buf_len);
+ if (t->feature_mask & AES_TEST_FEATURE_OOP)
+ TEST_HEXDUMP(stdout, "m_dst:",
+ rte_pktmbuf_mtod(sym_op->m_dst, uint8_t *),
+ buf_len);
+
+ /* Verify results */
+ if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ if (t->op_mask & AES_TEST_OP_AUTH_VERIFY)
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: Digest verification failed "
+ "(0x%X)", __LINE__, op->status);
+ else
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: Digest verification failed "
+ "(0x%X)", __LINE__, op->status);
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (t->op_mask & AES_TEST_OP_CIPHER) {
+ uint8_t *crypto_res;
+ const uint8_t *compare_ref;
+ uint32_t compare_len;
+
+ crypto_res = rte_pktmbuf_mtod_offset(iobuf, uint8_t *,
+ tdata->iv.len);
+
+ if (t->op_mask & AES_TEST_OP_ENCRYPT) {
+ compare_ref = tdata->ciphertext.data;
+ compare_len = tdata->ciphertext.len;
+ } else {
+ compare_ref = tdata->plaintext.data;
+ compare_len = tdata->plaintext.len;
+ }
+
+ if (memcmp(crypto_res, compare_ref, compare_len)) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: %s", __LINE__,
+ "Crypto data not as expected");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ }
+
+ if (t->op_mask & AES_TEST_OP_AUTH_GEN) {
+ uint8_t *auth_res;
+
+ if (t->op_mask & AES_TEST_OP_CIPHER)
+ auth_res = rte_pktmbuf_mtod_offset(iobuf,
+ uint8_t *,
+ tdata->iv.len + tdata->ciphertext.len);
+ else
+ auth_res = rte_pktmbuf_mtod_offset(iobuf,
+ uint8_t *, tdata->ciphertext.len);
+
+ if (memcmp(auth_res, tdata->digest.data, digest_len)) {
+ snprintf(test_msg, AES_TEST_MSG_LEN, "line %u "
+ "FAILED: %s", __LINE__, "Generated "
+ "digest data not as expected");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ }
+
+ snprintf(test_msg, AES_TEST_MSG_LEN, "PASS");
+
+error_exit:
+ if (!(t->feature_mask & AES_TEST_FEATURE_SESSIONLESS)) {
+ if (sess)
+ rte_cryptodev_sym_session_free(dev_id, sess);
+ if (cipher_xform)
+ rte_free(cipher_xform);
+ if (auth_xform)
+ rte_free(auth_xform);
+ }
+
+ if (op)
+ rte_crypto_op_free(op);
+
+ if (obuf)
+ rte_pktmbuf_free(obuf);
+
+ if (ibuf)
+ rte_pktmbuf_free(ibuf);
+
+ return status;
+}
+
+int
+test_AES_all_tests(struct rte_mempool *mbuf_pool,
+ struct rte_mempool *op_mpool,
+ uint8_t dev_id,
+ enum rte_cryptodev_type cryptodev_type)
+{
+ int status, overall_status = TEST_SUCCESS;
+ uint32_t i, test_index = 0;
+ char test_msg[AES_TEST_MSG_LEN + 1];
+ uint32_t n_test_cases = sizeof(aes_test_cases) /
+ sizeof(aes_test_cases[0]);
+ uint32_t target_pmd_mask = 0;
+
+ switch (cryptodev_type) {
+ case RTE_CRYPTODEV_AESNI_MB_PMD:
+ target_pmd_mask = AES_TEST_TARGET_PMD_MB;
+ break;
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
+ target_pmd_mask = AES_TEST_TARGET_PMD_QAT;
+ break;
+ default:
+ TEST_ASSERT(-1, "Unrecognized cryptodev type");
+ break;
+ }
+
+ for (i = 0; i < n_test_cases; i++) {
+ const struct aes_test_case *tc = &aes_test_cases[i];
+
+ if (!(tc->pmd_mask & target_pmd_mask))
+ continue;
+
+ status = test_AES_one_case(tc, mbuf_pool, op_mpool,
+ dev_id, cryptodev_type, test_msg);
+
+ printf(" %u) TestCase %s %s\n", test_index ++,
+ tc->test_descr, test_msg);
+
+ if (status != TEST_SUCCESS) {
+ if (overall_status == TEST_SUCCESS)
+ overall_status = status;
+
+ if (tc->feature_mask & AES_TEST_FEATURE_STOPPER)
+ break;
+ }
+ }
+
+ return overall_status;
+}
diff --git a/app/test/test_cryptodev_aes.h b/app/test/test_cryptodev_aes.h
new file mode 100644
index 00000000..ef518e0a
--- /dev/null
+++ b/app/test/test_cryptodev_aes.h
@@ -0,0 +1,1124 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_CRYPTODEV_AES_H_
+#define TEST_CRYPTODEV_AES_H_
+
+struct aes_test_data {
+ enum rte_crypto_cipher_algorithm crypto_algo;
+
+ struct {
+ uint8_t data[64];
+ unsigned len;
+ } cipher_key;
+
+ struct {
+ uint8_t data[64] __rte_aligned(16);
+ unsigned len;
+ } iv;
+
+ struct {
+ uint8_t data[2048];
+ unsigned len;
+ } plaintext;
+
+ struct {
+ uint8_t data[2048];
+ unsigned len;
+ } ciphertext;
+
+ enum rte_crypto_auth_algorithm auth_algo;
+
+ struct {
+ uint8_t data[128];
+ unsigned len;
+ } auth_key;
+
+ struct {
+ uint8_t data[128];
+ unsigned len; /* for qat */
+ unsigned truncated_len; /* for mb */
+ } digest;
+};
+
+int
+test_AES_all_tests(struct rte_mempool *mbuf_pool,
+ struct rte_mempool *op_mpool,
+ uint8_t dev_id,
+ enum rte_cryptodev_type cryptodev_type);
+
+/* test vectors */
+/* AES128-CTR-SHA1 test vector */
+static const struct aes_test_data aes_test_data_1 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key = {
+ .data = {
+ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6,
+ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x6B, 0xC1, 0xBE, 0xE2, 0x2E, 0x40, 0x9F, 0x96,
+ 0xE9, 0x3D, 0x7E, 0x11, 0x73, 0x93, 0x17, 0x2A,
+ 0xAE, 0x2D, 0x8A, 0x57, 0x1E, 0x03, 0xAC, 0x9C,
+ 0x9E, 0xB7, 0x6F, 0xAC, 0x45, 0xAF, 0x8E, 0x51,
+ 0x30, 0xC8, 0x1C, 0x46, 0xA3, 0x5C, 0xE4, 0x11,
+ 0xE5, 0xFB, 0xC1, 0x19, 0x1A, 0x0A, 0x52, 0xEF,
+ 0xF6, 0x9F, 0x24, 0x45, 0xDF, 0x4F, 0x9B, 0x17,
+ 0xAD, 0x2B, 0x41, 0x7B, 0xE6, 0x6C, 0x37, 0x10
+ },
+ .len = 64
+ },
+ .ciphertext = {
+ .data = {
+ 0x87, 0x4D, 0x61, 0x91, 0xB6, 0x20, 0xE3, 0x26,
+ 0x1B, 0xEF, 0x68, 0x64, 0x99, 0x0D, 0xB6, 0xCE,
+ 0x98, 0x06, 0xF6, 0x6B, 0x79, 0x70, 0xFD, 0xFF,
+ 0x86, 0x17, 0x18, 0x7B, 0xB9, 0xFF, 0xFD, 0xFF,
+ 0x5A, 0xE4, 0xDF, 0x3E, 0xDB, 0xD5, 0xD3, 0x5E,
+ 0x5B, 0x4F, 0x09, 0x02, 0x0D, 0xB0, 0x3E, 0xAB,
+ 0x1E, 0x03, 0x1D, 0xDA, 0x2F, 0xBE, 0x03, 0xD1,
+ 0x79, 0x21, 0x70, 0xA0, 0xF3, 0x00, 0x9C, 0xEE
+ },
+ .len = 64
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .auth_key = {
+ .data = {
+ 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
+ 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
+ 0xDE, 0xF4, 0xDE, 0xAD
+ },
+ .len = 20
+ },
+ .digest = {
+ .data = {
+ 0x9B, 0x6F, 0x0C, 0x43, 0xF5, 0xC1, 0x3E, 0xB0,
+ 0xB1, 0x70, 0xB8, 0x2B, 0x33, 0x09, 0xD2, 0xB2,
+ 0x56, 0x20, 0xFB, 0xFE
+ },
+ .len = 20,
+ .truncated_len = 12
+ }
+};
+
+/** AES-192-CTR XCBC test vector */
+static const struct aes_test_data aes_test_data_2 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key = {
+ .data = {
+ 0xCB, 0xC5, 0xED, 0x5B, 0xE7, 0x7C, 0xBD, 0x8C,
+ 0x50, 0xD9, 0x30, 0xF2, 0xB5, 0x6A, 0x0E, 0x5F,
+ 0xAA, 0xAE, 0xAD, 0xA2, 0x1F, 0x49, 0x52, 0xD4
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0x3F, 0x69, 0xA8, 0xCD, 0xE8, 0xF0, 0xEF, 0x40,
+ 0xB8, 0x7A, 0x4B, 0xED, 0x2B, 0xAF, 0xBF, 0x57
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x01, 0x0F, 0x10, 0x1F, 0x20, 0x1C, 0x0E, 0xB8,
+ 0xFB, 0x5C, 0xCD, 0xCC, 0x1F, 0xF9, 0xAF, 0x0B,
+ 0x95, 0x03, 0x74, 0x99, 0x49, 0xE7, 0x62, 0x55,
+ 0xDA, 0xEA, 0x13, 0x20, 0x1D, 0xC6, 0xCC, 0xCC,
+ 0xD1, 0x70, 0x75, 0x47, 0x02, 0x2F, 0xFB, 0x86,
+ 0xBB, 0x6B, 0x23, 0xD2, 0xC9, 0x74, 0xD7, 0x7B,
+ 0x08, 0x03, 0x3B, 0x79, 0x39, 0xBB, 0x91, 0x29,
+ 0xDA, 0x14, 0x39, 0x8D, 0xFF, 0x81, 0x50, 0x96,
+ },
+ .len = 64
+ },
+ .ciphertext = {
+ .data = {
+ 0x4A, 0x6C, 0xC8, 0xCC, 0x96, 0x2A, 0x13, 0x84,
+ 0x1C, 0x36, 0x88, 0xE9, 0xE5, 0x94, 0x70, 0xB2,
+ 0x14, 0x5B, 0x13, 0x80, 0xEA, 0xD8, 0x8D, 0x37,
+ 0xFD, 0x70, 0xA8, 0x83, 0xE8, 0x2B, 0x88, 0x1E,
+ 0xBA, 0x94, 0x3F, 0xF6, 0xB3, 0x1F, 0xDE, 0x34,
+ 0xF3, 0x5B, 0x80, 0xE9, 0xAB, 0xF5, 0x1C, 0x29,
+ 0xB6, 0xD9, 0x76, 0x2B, 0x06, 0xC6, 0x74, 0xF1,
+ 0x59, 0x5E, 0x9E, 0xA5, 0x7B, 0x2D, 0xD7, 0xF0
+ },
+ .len = 64
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .auth_key = {
+ .data = {
+ 0x87, 0x61, 0x54, 0x53, 0xC4, 0x6D, 0xDD, 0x51,
+ 0xE1, 0x9F, 0x86, 0x64, 0x39, 0x0A, 0xE6, 0x59
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {
+ 0xCA, 0x33, 0xB3, 0x3B, 0x16, 0x94, 0xAA, 0x55,
+ 0x36, 0x6B, 0x45, 0x46
+ },
+ .len = 12,
+ .truncated_len = 12
+ }
+};
+
+/** AES-256-CTR SHA1 test vector */
+static const struct aes_test_data aes_test_data_3 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key = {
+ .data = {
+ 0x60, 0x3D, 0xEB, 0x10, 0x15, 0xCA, 0x71, 0xBE,
+ 0x2B, 0x73, 0xAE, 0xF0, 0x85, 0x7D, 0x77, 0x81,
+ 0x1F, 0x35, 0x2C, 0x07, 0x3B, 0x61, 0x08, 0xD7,
+ 0x2D, 0x98, 0x10, 0xA3, 0x09, 0x14, 0xDF, 0xF4
+ },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x6B, 0xC1, 0xBE, 0xE2, 0x2E, 0x40, 0x9F, 0x96,
+ 0xE9, 0x3D, 0x7E, 0x11, 0x73, 0x93, 0x17, 0x2A,
+ 0xAE, 0x2D, 0x8A, 0x57, 0x1E, 0x03, 0xAC, 0x9C,
+ 0x9E, 0xB7, 0x6F, 0xAC, 0x45, 0xAF, 0x8E, 0x51,
+ 0x30, 0xC8, 0x1C, 0x46, 0xA3, 0x5C, 0xE4, 0x11,
+ 0xE5, 0xFB, 0xC1, 0x19, 0x1A, 0x0A, 0x52, 0xEF,
+ 0xF6, 0x9F, 0x24, 0x45, 0xDF, 0x4F, 0x9B, 0x17,
+ 0xAD, 0x2B, 0x41, 0x7B, 0xE6, 0x6C, 0x37, 0x10
+ },
+ .len = 64
+ },
+ .ciphertext = {
+ .data = {
+ 0x60, 0x1E, 0xC3, 0x13, 0x77, 0x57, 0x89, 0xA5,
+ 0xB7, 0xA7, 0xF5, 0x04, 0xBB, 0xF3, 0xD2, 0x28,
+ 0xF4, 0x43, 0xE3, 0xCA, 0x4D, 0x62, 0xB5, 0x9A,
+ 0xCA, 0x84, 0xE9, 0x90, 0xCA, 0xCA, 0xF5, 0xC5,
+ 0x2B, 0x09, 0x30, 0xDA, 0xA2, 0x3D, 0xE9, 0x4C,
+ 0xE8, 0x70, 0x17, 0xBA, 0x2D, 0x84, 0x98, 0x8D,
+ 0xDF, 0xC9, 0xC5, 0x8D, 0xB6, 0x7A, 0xAD, 0xA6,
+ 0x13, 0xC2, 0xDD, 0x08, 0x45, 0x79, 0x41, 0xA6
+ },
+ .len = 64
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .auth_key = {
+ .data = {
+ 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
+ 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
+ 0xDE, 0xF4, 0xDE, 0xAD
+ },
+ .len = 20
+ },
+ .digest = {
+ .data = {
+ 0x3B, 0x1A, 0x9D, 0x82, 0x35, 0xD5, 0xDD, 0x64,
+ 0xCC, 0x1B, 0xA9, 0xC0, 0xEB, 0xE9, 0x42, 0x16,
+ 0xE7, 0x87, 0xA3, 0xEF
+ },
+ .len = 20,
+ .truncated_len = 12
+ }
+};
+
+/** AES-128-CBC SHA1 test vector */
+static const struct aes_test_data aes_test_data_4 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ "What a lousy earth! He wondered how many people "
+ "were destitute that same night even in his own "
+ "prosperous country, how many homes were "
+ "shanties, how many husbands were drunk and "
+ "wives socked, and how many children were "
+ "bullied, abused, or abandoned. How many "
+ "families hungered for food they could not "
+ "afford to buy? How many hearts were broken? How "
+ "many suicides would take place that same night, "
+ "how many people would go insane? How many "
+ "cockroaches and landlords would triumph? How "
+ "many winners were losers, successes failures, "
+ "and rich men poor men? How many wise guys were "
+ "stupid? How many happy endings were unhappy "
+ "endings? How many honest men were liars, brave "
+ "men cowards, loyal men traitors, how many "
+ "sainted men were corrupt, how many people in "
+ "positions of trust had sold their souls to "
+ "bodyguards, how many had never had souls? How "
+ "many straight-and-narrow paths were crooked "
+ "paths? How many best families were worst "
+ "families and how many good people were bad "
+ "people? When you added them all up and then "
+ "subtracted, you might be left with only the "
+ "children, and perhaps with Albert Einstein and "
+ "an old violinist or sculptor somewhere."
+ },
+ .len = 512
+ },
+ .ciphertext = {
+ .data = {
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
+ },
+ .len = 512
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .auth_key = {
+ .data = {
+ 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
+ 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
+ 0xDE, 0xF4, 0xDE, 0xAD
+ },
+ .len = 20
+ },
+ .digest = {
+ .data = {
+ 0x9A, 0x4F, 0x88, 0x1B, 0xB6, 0x8F, 0xD8, 0x60,
+ 0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
+ 0x18, 0x8C, 0x1D, 0x32
+ },
+ .len = 20,
+ .truncated_len = 12
+ }
+};
+
+/** AES-128-CBC SHA256 test vector */
+static const struct aes_test_data aes_test_data_5 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ "What a lousy earth! He wondered how many people "
+ "were destitute that same night even in his own "
+ "prosperous country, how many homes were "
+ "shanties, how many husbands were drunk and "
+ "wives socked, and how many children were "
+ "bullied, abused, or abandoned. How many "
+ "families hungered for food they could not "
+ "afford to buy? How many hearts were broken? How "
+ "many suicides would take place that same night, "
+ "how many people would go insane? How many "
+ "cockroaches and landlords would triumph? How "
+ "many winners were losers, successes failures, "
+ "and rich men poor men? How many wise guys were "
+ "stupid? How many happy endings were unhappy "
+ "endings? How many honest men were liars, brave "
+ "men cowards, loyal men traitors, how many "
+ "sainted men were corrupt, how many people in "
+ "positions of trust had sold their souls to "
+ "bodyguards, how many had never had souls? How "
+ "many straight-and-narrow paths were crooked "
+ "paths? How many best families were worst "
+ "families and how many good people were bad "
+ "people? When you added them all up and then "
+ "subtracted, you might be left with only the "
+ "children, and perhaps with Albert Einstein and "
+ "an old violinist or sculptor somewhere."
+ },
+ .len = 512
+ },
+ .ciphertext = {
+ .data = {
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
+ },
+ .len = 512
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .auth_key = {
+ .data = {
+ 0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x9A, 0x4F, 0x88, 0x1B, 0xB6, 0x8F, 0xD8, 0x60
+ },
+ .len = 32
+ },
+ .digest = {
+ .data = {
+ 0xC8, 0x57, 0x57, 0x31, 0x03, 0xE0, 0x03, 0x55,
+ 0x07, 0xC8, 0x9E, 0x7F, 0x48, 0x9A, 0x61, 0x9A,
+ 0x68, 0xEE, 0x03, 0x0E, 0x71, 0x75, 0xC7, 0xF4,
+ 0x2E, 0x45, 0x26, 0x32, 0x7C, 0x12, 0x15, 0x15
+ },
+ .len = 32,
+ .truncated_len = 16
+ }
+};
+
+/** AES-128-CBC SHA512 test vector */
+static const struct aes_test_data aes_test_data_6 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ "What a lousy earth! He wondered how many people "
+ "were destitute that same night even in his own "
+ "prosperous country, how many homes were "
+ "shanties, how many husbands were drunk and "
+ "wives socked, and how many children were "
+ "bullied, abused, or abandoned. How many "
+ "families hungered for food they could not "
+ "afford to buy? How many hearts were broken? How "
+ "many suicides would take place that same night, "
+ "how many people would go insane? How many "
+ "cockroaches and landlords would triumph? How "
+ "many winners were losers, successes failures, "
+ "and rich men poor men? How many wise guys were "
+ "stupid? How many happy endings were unhappy "
+ "endings? How many honest men were liars, brave "
+ "men cowards, loyal men traitors, how many "
+ "sainted men were corrupt, how many people in "
+ "positions of trust had sold their souls to "
+ "bodyguards, how many had never had souls? How "
+ "many straight-and-narrow paths were crooked "
+ "paths? How many best families were worst "
+ "families and how many good people were bad "
+ "people? When you added them all up and then "
+ "subtracted, you might be left with only the "
+ "children, and perhaps with Albert Einstein and "
+ "an old violinist or sculptor somewhere."
+ },
+ .len = 512
+ },
+ .ciphertext = {
+ .data = {
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
+ },
+ .len = 512
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .auth_key = {
+ .data = {
+ 0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x65, 0x1C, 0x42, 0x50, 0x76,
+ 0x9A, 0xAF, 0x88, 0x1B, 0xB6, 0x8F, 0xF8, 0x60,
+ 0xA2, 0x5A, 0x7F, 0x3F, 0xF4, 0x72, 0x70, 0xF1,
+ 0xF5, 0x35, 0x4C, 0x3B, 0xDD, 0x90, 0x65, 0xB0,
+ 0x47, 0x3A, 0x75, 0x61, 0x5C, 0xA2, 0x10, 0x76,
+ 0x9A, 0xAF, 0x77, 0x5B, 0xB6, 0x7F, 0xF7, 0x60
+ },
+ .len = 64
+ },
+ .digest = {
+ .data = {
+ 0x5D, 0x54, 0x66, 0xC1, 0x6E, 0xBC, 0x04, 0xB8,
+ 0x46, 0xB8, 0x08, 0x6E, 0xE0, 0xF0, 0x43, 0x48,
+ 0x37, 0x96, 0x9C, 0xC6, 0x9C, 0xC2, 0x1E, 0xE8,
+ 0xF2, 0x0C, 0x0B, 0xEF, 0x86, 0xA2, 0xE3, 0x70,
+ 0x95, 0xC8, 0xB3, 0x06, 0x47, 0xA9, 0x90, 0xE8,
+ 0xA0, 0xC6, 0x72, 0x69, 0x05, 0xC0, 0x0D, 0x0E,
+ 0x21, 0x96, 0x65, 0x93, 0x74, 0x43, 0x2A, 0x1D,
+ 0x2E, 0xBF, 0xC2, 0xC2, 0xEE, 0xCC, 0x2F, 0x0A
+ },
+ .len = 64,
+ .truncated_len = 32
+ }
+};
+
+/** AES-128-CBC XCBC test vector */
+static const struct aes_test_data aes_test_data_7 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ "What a lousy earth! He wondered how many people "
+ "were destitute that same night even in his own "
+ "prosperous country, how many homes were "
+ "shanties, how many husbands were drunk and "
+ "wives socked, and how many children were "
+ "bullied, abused, or abandoned. How many "
+ "families hungered for food they could not "
+ "afford to buy? How many hearts were broken? How "
+ "many suicides would take place that same night, "
+ "how many people would go insane? How many "
+ "cockroaches and landlords would triumph? How "
+ "many winners were losers, successes failures, "
+ "and rich men poor men? How many wise guys were "
+ "stupid? How many happy endings were unhappy "
+ "endings? How many honest men were liars, brave "
+ "men cowards, loyal men traitors, how many "
+ "sainted men were corrupt, how many people in "
+ "positions of trust had sold their souls to "
+ "bodyguards, how many had never had souls? How "
+ "many straight-and-narrow paths were crooked "
+ "paths? How many best families were worst "
+ "families and how many good people were bad "
+ "people? When you added them all up and then "
+ "subtracted, you might be left with only the "
+ "children, and perhaps with Albert Einstein and "
+ "an old violinist or sculptor somewhere."
+ },
+ .len = 512
+ },
+ .ciphertext = {
+ .data = {
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
+ },
+ .len = 512
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .auth_key = {
+ .data = {
+ 0x87, 0x61, 0x54, 0x53, 0xC4, 0x6D, 0xDD, 0x51,
+ 0xE1, 0x9F, 0x86, 0x64, 0x39, 0x0A, 0xE6, 0x59
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {
+ 0xE0, 0xAC, 0x9A, 0xC4, 0x22, 0x64, 0x35, 0x89,
+ 0x77, 0x1D, 0x8B, 0x75
+ },
+ .len = 12,
+ .truncated_len = 12
+ }
+};
+
+/** AES-128-CBC SHA224 test vector */
+static const struct aes_test_data aes_test_data_8 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ "What a lousy earth! He wondered how many people "
+ "were destitute that same night even in his own "
+ "prosperous country, how many homes were "
+ "shanties, how many husbands were drunk and "
+ "wives socked, and how many children were "
+ "bullied, abused, or abandoned. How many "
+ "families hungered for food they could not "
+ "afford to buy? How many hearts were broken? How "
+ "many suicides would take place that same night, "
+ "how many people would go insane? How many "
+ "cockroaches and landlords would triumph? How "
+ "many winners were losers, successes failures, "
+ "and rich men poor men? How many wise guys were "
+ "stupid? How many happy endings were unhappy "
+ "endings? How many honest men were liars, brave "
+ "men cowards, loyal men traitors, how many "
+ "sainted men were corrupt, how many people in "
+ "positions of trust had sold their souls to "
+ "bodyguards, how many had never had souls? How "
+ "many straight-and-narrow paths were crooked "
+ "paths? How many best families were worst "
+ "families and how many good people were bad "
+ "people? When you added them all up and then "
+ "subtracted, you might be left with only the "
+ "children, and perhaps with Albert Einstein and "
+ "an old violinist or sculptor somewhere."
+ },
+ .len = 512
+ },
+ .ciphertext = {
+ .data = {
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
+ },
+ .len = 512
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .auth_key = {
+ .data = {
+ 0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x65, 0x1C, 0x42, 0x50, 0x76,
+ 0x9A, 0xAF, 0x88, 0x1B, 0xB6, 0x8F, 0xF8, 0x60,
+ 0xA2, 0x5A, 0x7F, 0x3F, 0xF4, 0x72, 0x70, 0xF1,
+ 0xF5, 0x35, 0x4C, 0x3B, 0xDD, 0x90, 0x65, 0xB0,
+ 0x47, 0x3A, 0x75, 0x61, 0x5C, 0xA2, 0x10, 0x76,
+ 0x9A, 0xAF, 0x77, 0x5B, 0xB6, 0x7F, 0xF7, 0x60
+ },
+ .len = 64
+ },
+ .digest = {
+ .data = {
+ 0xA3, 0xCA, 0xC7, 0x1D, 0xA8, 0x61, 0x30, 0x98,
+ 0x3B, 0x8F, 0x01, 0x19, 0xAE, 0x8D, 0xBD, 0x34,
+ 0x40, 0x63, 0xA8, 0x2F, 0xDF, 0x85, 0x2B, 0x7F,
+ 0x63, 0x7C, 0xDD, 0xB7
+ },
+ .len = 28,
+ .truncated_len = 14
+ }
+};
+
+/** AES-128-CBC SHA384 test vector */
+static const struct aes_test_data aes_test_data_9 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ "What a lousy earth! He wondered how many people "
+ "were destitute that same night even in his own "
+ "prosperous country, how many homes were "
+ "shanties, how many husbands were drunk and "
+ "wives socked, and how many children were "
+ "bullied, abused, or abandoned. How many "
+ "families hungered for food they could not "
+ "afford to buy? How many hearts were broken? How "
+ "many suicides would take place that same night, "
+ "how many people would go insane? How many "
+ "cockroaches and landlords would triumph? How "
+ "many winners were losers, successes failures, "
+ "and rich men poor men? How many wise guys were "
+ "stupid? How many happy endings were unhappy "
+ "endings? How many honest men were liars, brave "
+ "men cowards, loyal men traitors, how many "
+ "sainted men were corrupt, how many people in "
+ "positions of trust had sold their souls to "
+ "bodyguards, how many had never had souls? How "
+ "many straight-and-narrow paths were crooked "
+ "paths? How many best families were worst "
+ "families and how many good people were bad "
+ "people? When you added them all up and then "
+ "subtracted, you might be left with only the "
+ "children, and perhaps with Albert Einstein and "
+ "an old violinist or sculptor somewhere."
+ },
+ .len = 512
+ },
+ .ciphertext = {
+ .data = {
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
+ },
+ .len = 512
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .auth_key = {
+ .data = {
+ 0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x65, 0x1C, 0x42, 0x50, 0x76,
+ 0x9A, 0xAF, 0x88, 0x1B, 0xB6, 0x8F, 0xF8, 0x60,
+ 0xA2, 0x5A, 0x7F, 0x3F, 0xF4, 0x72, 0x70, 0xF1,
+ 0xF5, 0x35, 0x4C, 0x3B, 0xDD, 0x90, 0x65, 0xB0,
+ 0x47, 0x3A, 0x75, 0x61, 0x5C, 0xA2, 0x10, 0x76,
+ 0x9A, 0xAF, 0x77, 0x5B, 0xB6, 0x7F, 0xF7, 0x60,
+ 0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x65, 0x1C, 0x42, 0x50, 0x76,
+ 0x9A, 0xAF, 0x88, 0x1B, 0xB6, 0x8F, 0xF8, 0x60,
+ 0xA2, 0x5A, 0x7F, 0x3F, 0xF4, 0x72, 0x70, 0xF1,
+ 0xF5, 0x35, 0x4C, 0x3B, 0xDD, 0x90, 0x65, 0xB0,
+ 0x47, 0x3A, 0x75, 0x61, 0x5C, 0xA2, 0x10, 0x76,
+ 0x9A, 0xAF, 0x77, 0x5B, 0xB6, 0x7F, 0xF7, 0x60
+ },
+ .len = 128
+ },
+ .digest = {
+ .data = {
+ 0x23, 0x60, 0xC8, 0xB1, 0x2D, 0x6C, 0x1E, 0x72,
+ 0x25, 0xAB, 0xF9, 0xC3, 0x9A, 0xA9, 0x4F, 0x8C,
+ 0x56, 0x38, 0x65, 0x0E, 0x74, 0xD5, 0x45, 0x9D,
+ 0xA3, 0xFD, 0x7E, 0x6D, 0x9E, 0x74, 0x88, 0x9D,
+ 0xA7, 0x12, 0x9D, 0xD8, 0x81, 0x3C, 0x86, 0x2F,
+ 0x4D, 0xF9, 0x6F, 0x0A, 0xB0, 0xC9, 0xEB, 0x0B
+ },
+ .len = 48,
+ .truncated_len = 24
+ }
+};
+
+#endif /* TEST_CRYPTODEV_AES_H_ */
diff --git a/app/test/test_cryptodev_kasumi_hash_test_vectors.h b/app/test/test_cryptodev_kasumi_hash_test_vectors.h
new file mode 100644
index 00000000..c080b9f5
--- /dev/null
+++ b/app/test/test_cryptodev_kasumi_hash_test_vectors.h
@@ -0,0 +1,260 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_CRYPTODEV_KASUMI_HASH_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_KASUMI_HASH_TEST_VECTORS_H_
+
+struct kasumi_hash_test_data {
+ struct {
+ uint8_t data[16];
+ unsigned len;
+ } key;
+
+ /* Includes: COUNT (4 bytes) and FRESH (4 bytes) */
+ struct {
+ uint8_t data[8];
+ unsigned len;
+ } aad;
+
+ /* Includes message and DIRECTION (1 bit), plus 1 0*,
+ * with enough 0s, so total length is multiple of 64 bits */
+ struct {
+ uint8_t data[2056];
+ unsigned len; /* length must be in Bits */
+ } plaintext;
+
+ /* Actual length of data to be hashed */
+ struct {
+ unsigned len;
+ } validAuthLenInBits;
+
+ struct {
+ unsigned len;
+ } validAuthOffsetLenInBits;
+
+ struct {
+ uint8_t data[64];
+ unsigned len;
+ } digest;
+};
+
+struct kasumi_hash_test_data kasumi_hash_test_case_1 = {
+ .key = {
+ .data = {
+ 0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00,
+ 0x95, 0x2C, 0x49, 0x10, 0x48, 0x81, 0xFF, 0x48
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x6B, 0x22, 0x77, 0x37, 0x29, 0x6F, 0x39, 0x3C,
+ 0x80, 0x79, 0x35, 0x3E, 0xDC, 0x87, 0xE2, 0xE8,
+ 0x05, 0xD2, 0xEC, 0x49, 0xA4, 0xF2, 0xD8, 0xE2
+ },
+ .len = 192
+ },
+ .validAuthLenInBits = {
+ .len = 189
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 64
+ },
+ .digest = {
+ .data = {0xF6, 0x3B, 0xD7, 0x2C},
+ .len = 4
+ }
+};
+
+struct kasumi_hash_test_data kasumi_hash_test_case_2 = {
+ .key = {
+ .data = {
+ 0xD4, 0x2F, 0x68, 0x24, 0x28, 0x20, 0x1C, 0xAF,
+ 0xCD, 0x9F, 0x97, 0x94, 0x5E, 0x6D, 0xE7, 0xB7
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2,
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xB5, 0x92, 0x43, 0x84, 0x32, 0x8A, 0x4A, 0xE0,
+ 0x0B, 0x73, 0x71, 0x09, 0xF8, 0xB6, 0xC8, 0xDD,
+ 0x2B, 0x4D, 0xB6, 0x3D, 0xD5, 0x33, 0x98, 0x1C,
+ 0xEB, 0x19, 0xAA, 0xD5, 0x2A, 0x5B, 0x2B, 0xC3
+ },
+ .len = 256
+ },
+ .validAuthLenInBits = {
+ .len = 254
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 64
+ },
+ .digest = {
+ .data = {0xA9, 0xDA, 0xF1, 0xFF},
+ .len = 4
+ }
+};
+
+struct kasumi_hash_test_data kasumi_hash_test_case_3 = {
+ .key = {
+ .data = {
+ 0xFD, 0xB9, 0xCF, 0xDF, 0x28, 0x93, 0x6C, 0xC4,
+ 0x83, 0xA3, 0x18, 0x69, 0xD8, 0x1B, 0x8F, 0xAB
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A,
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x59, 0x32, 0xBC, 0x0A, 0xCE, 0x2B, 0x0A, 0xBA,
+ 0x33, 0xD8, 0xAC, 0x18, 0x8A, 0xC5, 0x4F, 0x34,
+ 0x6F, 0xAD, 0x10, 0xBF, 0x9D, 0xEE, 0x29, 0x20,
+ 0xB4, 0x3B, 0xD0, 0xC5, 0x3A, 0x91, 0x5C, 0xB7,
+ 0xDF, 0x6C, 0xAA, 0x72, 0x05, 0x3A, 0xBF, 0xF3,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 384
+ },
+ .validAuthLenInBits = {
+ .len = 319
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 64
+ },
+ .digest = {
+ .data = {0x15, 0x37, 0xD3, 0x16},
+ .len = 4
+ }
+};
+
+struct kasumi_hash_test_data kasumi_hash_test_case_4 = {
+ .key = {
+ .data = {
+ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9,
+ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x14, 0x79, 0x3E, 0x41, 0x03, 0x97, 0xE8, 0xFD
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xD0, 0xA7, 0xD4, 0x63, 0xDF, 0x9F, 0xB2, 0xB2,
+ 0x78, 0x83, 0x3F, 0xA0, 0x2E, 0x23, 0x5A, 0xA1,
+ 0x72, 0xBD, 0x97, 0x0C, 0x14, 0x73, 0xE1, 0x29,
+ 0x07, 0xFB, 0x64, 0x8B, 0x65, 0x99, 0xAA, 0xA0,
+ 0xB2, 0x4A, 0x03, 0x86, 0x65, 0x42, 0x2B, 0x20,
+ 0xA4, 0x99, 0x27, 0x6A, 0x50, 0x42, 0x70, 0x09,
+ 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 448
+ },
+ .validAuthLenInBits = {
+ .len = 384
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 64
+ },
+ .digest = {
+ .data = {0xDD, 0x7D, 0xFA, 0xDD },
+ .len = 4
+ }
+};
+
+struct kasumi_hash_test_data kasumi_hash_test_case_5 = {
+ .key = {
+ .data = {
+ 0xF4, 0xEB, 0xEC, 0x69, 0xE7, 0x3E, 0xAF, 0x2E,
+ 0xB2, 0xCF, 0x6A, 0xF4, 0xB3, 0x12, 0x0F, 0xFD
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x29, 0x6F, 0x39, 0x3C, 0x6B, 0x22, 0x77, 0x37,
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x10, 0xBF, 0xFF, 0x83, 0x9E, 0x0C, 0x71, 0x65,
+ 0x8D, 0xBB, 0x2D, 0x17, 0x07, 0xE1, 0x45, 0x72,
+ 0x4F, 0x41, 0xC1, 0x6F, 0x48, 0xBF, 0x40, 0x3C,
+ 0x3B, 0x18, 0xE3, 0x8F, 0xD5, 0xD1, 0x66, 0x3B,
+ 0x6F, 0x6D, 0x90, 0x01, 0x93, 0xE3, 0xCE, 0xA8,
+ 0xBB, 0x4F, 0x1B, 0x4F, 0x5B, 0xE8, 0x22, 0x03,
+ 0x22, 0x32, 0xA7, 0x8D, 0x7D, 0x75, 0x23, 0x8D,
+ 0x5E, 0x6D, 0xAE, 0xCD, 0x3B, 0x43, 0x22, 0xCF,
+ 0x59, 0xBC, 0x7E, 0xA8, 0x4A, 0xB1, 0x88, 0x11,
+ 0xB5, 0xBF, 0xB7, 0xBC, 0x55, 0x3F, 0x4F, 0xE4,
+ 0x44, 0x78, 0xCE, 0x28, 0x7A, 0x14, 0x87, 0x99,
+ 0x90, 0xD1, 0x8D, 0x12, 0xCA, 0x79, 0xD2, 0xC8,
+ 0x55, 0x14, 0x90, 0x21, 0xCD, 0x5C, 0xE8, 0xCA,
+ 0x03, 0x71, 0xCA, 0x04, 0xFC, 0xCE, 0x14, 0x3E,
+ 0x3D, 0x7C, 0xFE, 0xE9, 0x45, 0x85, 0xB5, 0x88,
+ 0x5C, 0xAC, 0x46, 0x06, 0x8B, 0xC0, 0x00, 0x00
+ },
+ .len = 1024
+ },
+ .validAuthLenInBits = {
+ .len = 1000
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 64
+ },
+ .digest = {
+ .data = {0xC3, 0x83, 0x83, 0x9D},
+ .len = 4
+ }
+};
+#endif /* TEST_CRYPTODEV_KASUMI_HASH_TEST_VECTORS_H_ */
diff --git a/app/test/test_cryptodev_kasumi_test_vectors.h b/app/test/test_cryptodev_kasumi_test_vectors.h
new file mode 100644
index 00000000..9163d7c5
--- /dev/null
+++ b/app/test/test_cryptodev_kasumi_test_vectors.h
@@ -0,0 +1,308 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_CRYPTODEV_KASUMI_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_KASUMI_TEST_VECTORS_H_
+
+struct kasumi_test_data {
+ struct {
+ uint8_t data[64];
+ unsigned len;
+ } key;
+
+ struct {
+ uint8_t data[64] __rte_aligned(16);
+ unsigned len;
+ } iv;
+
+ struct {
+ uint8_t data[1024];
+ unsigned len; /* length must be in Bits */
+ } plaintext;
+
+ struct {
+ uint8_t data[1024];
+ unsigned len; /* length must be in Bits */
+ } ciphertext;
+
+ struct {
+ unsigned len;
+ } validCipherLenInBits;
+
+ struct {
+ unsigned len;
+ } validCipherOffsetLenInBits;
+};
+
+struct kasumi_test_data kasumi_test_case_1 = {
+ .key = {
+ .data = {
+ 0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00,
+ 0x95, 0x2C, 0x49, 0x10, 0x48, 0x81, 0xFF, 0x48
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x7E, 0xC6, 0x12, 0x72, 0x74, 0x3B, 0xF1, 0x61,
+ 0x47, 0x26, 0x44, 0x6A, 0x6C, 0x38, 0xCE, 0xD1,
+ 0x66, 0xF6, 0xCA, 0x76, 0xEB, 0x54, 0x30, 0x04,
+ 0x42, 0x86, 0x34, 0x6C, 0xEF, 0x13, 0x0F, 0x92,
+ 0x92, 0x2B, 0x03, 0x45, 0x0D, 0x3A, 0x99, 0x75,
+ 0xE5, 0xBD, 0x2E, 0xA0, 0xEB, 0x55, 0xAD, 0x8E,
+ 0x1B, 0x19, 0x9E, 0x3E, 0xC4, 0x31, 0x60, 0x20,
+ 0xE9, 0xA1, 0xB2, 0x85, 0xE7, 0x62, 0x79, 0x53,
+ 0x59, 0xB7, 0xBD, 0xFD, 0x39, 0xBE, 0xF4, 0xB2,
+ 0x48, 0x45, 0x83, 0xD5, 0xAF, 0xE0, 0x82, 0xAE,
+ 0xE6, 0x38, 0xBF, 0x5F, 0xD5, 0xA6, 0x06, 0x19,
+ 0x39, 0x01, 0xA0, 0x8F, 0x4A, 0xB4, 0x1A, 0xAB,
+ 0x9B, 0x13, 0x48, 0x80
+ },
+ .len = 800
+ },
+ .ciphertext = {
+ .data = {
+ 0xD1, 0xE2, 0xDE, 0x70, 0xEE, 0xF8, 0x6C, 0x69,
+ 0x64, 0xFB, 0x54, 0x2B, 0xC2, 0xD4, 0x60, 0xAA,
+ 0xBF, 0xAA, 0x10, 0xA4, 0xA0, 0x93, 0x26, 0x2B,
+ 0x7D, 0x19, 0x9E, 0x70, 0x6F, 0xC2, 0xD4, 0x89,
+ 0x15, 0x53, 0x29, 0x69, 0x10, 0xF3, 0xA9, 0x73,
+ 0x01, 0x26, 0x82, 0xE4, 0x1C, 0x4E, 0x2B, 0x02,
+ 0xBE, 0x20, 0x17, 0xB7, 0x25, 0x3B, 0xBF, 0x93,
+ 0x09, 0xDE, 0x58, 0x19, 0xCB, 0x42, 0xE8, 0x19,
+ 0x56, 0xF4, 0xC9, 0x9B, 0xC9, 0x76, 0x5C, 0xAF,
+ 0x53, 0xB1, 0xD0, 0xBB, 0x82, 0x79, 0x82, 0x6A,
+ 0xDB, 0xBC, 0x55, 0x22, 0xE9, 0x15, 0xC1, 0x20,
+ 0xA6, 0x18, 0xA5, 0xA7, 0xF5, 0xE8, 0x97, 0x08,
+ 0x93, 0x39, 0x65, 0x0F
+ },
+ .len = 800
+ },
+ .validCipherLenInBits = {
+ .len = 798
+ },
+ .validCipherOffsetLenInBits = {
+ .len = 64
+ },
+};
+
+struct kasumi_test_data kasumi_test_case_2 = {
+ .key = {
+ .data = {
+ 0xEF, 0xA8, 0xB2, 0x22, 0x9E, 0x72, 0x0C, 0x2A,
+ 0x7C, 0x36, 0xEA, 0x55, 0xE9, 0x60, 0x56, 0x95
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xE2, 0x8B, 0xCF, 0x7B, 0xC0, 0x00, 0x00, 0x00
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x10, 0x11, 0x12, 0x31, 0xE0, 0x60, 0x25, 0x3A,
+ 0x43, 0xFD, 0x3F, 0x57, 0xE3, 0x76, 0x07, 0xAB,
+ 0x28, 0x27, 0xB5, 0x99, 0xB6, 0xB1, 0xBB, 0xDA,
+ 0x37, 0xA8, 0xAB, 0xCC, 0x5A, 0x8C, 0x55, 0x0D,
+ 0x1B, 0xFB, 0x2F, 0x49, 0x46, 0x24, 0xFB, 0x50,
+ 0x36, 0x7F, 0xA3, 0x6C, 0xE3, 0xBC, 0x68, 0xF1,
+ 0x1C, 0xF9, 0x3B, 0x15, 0x10, 0x37, 0x6B, 0x02,
+ 0x13, 0x0F, 0x81, 0x2A, 0x9F, 0xA1, 0x69, 0xD8
+ },
+ .len = 512
+ },
+ .ciphertext = {
+ .data = {
+ 0x3D, 0xEA, 0xCC, 0x7C, 0x15, 0x82, 0x1C, 0xAA,
+ 0x89, 0xEE, 0xCA, 0xDE, 0x9B, 0x5B, 0xD3, 0x61,
+ 0x4B, 0xD0, 0xC8, 0x41, 0x9D, 0x71, 0x03, 0x85,
+ 0xDD, 0xBE, 0x58, 0x49, 0xEF, 0x1B, 0xAC, 0x5A,
+ 0xE8, 0xB1, 0x4A, 0x5B, 0x0A, 0x67, 0x41, 0x52,
+ 0x1E, 0xB4, 0xE0, 0x0B, 0xB9, 0xEC, 0xF3, 0xE9,
+ 0xF7, 0xCC, 0xB9, 0xCA, 0xE7, 0x41, 0x52, 0xD7,
+ 0xF4, 0xE2, 0xA0, 0x34, 0xB6, 0xEA, 0x00, 0xEC
+ },
+ .len = 512
+ },
+ .validCipherLenInBits = {
+ .len = 510
+ },
+ .validCipherOffsetLenInBits = {
+ .len = 64
+ }
+};
+
+struct kasumi_test_data kasumi_test_case_3 = {
+ .key = {
+ .data = {
+ 0x5A, 0xCB, 0x1D, 0x64, 0x4C, 0x0D, 0x51, 0x20,
+ 0x4E, 0xA5, 0xF1, 0x45, 0x10, 0x10, 0xD8, 0x52
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xAD, 0x9C, 0x44, 0x1F, 0x89, 0x0B, 0x38, 0xC4,
+ 0x57, 0xA4, 0x9D, 0x42, 0x14, 0x07, 0xE8
+ },
+ .len = 120
+ },
+ .ciphertext = {
+ .data = {
+ 0x9B, 0xC9, 0x2C, 0xA8, 0x03, 0xC6, 0x7B, 0x28,
+ 0xA1, 0x1A, 0x4B, 0xEE, 0x5A, 0x0C, 0x25
+ },
+ .len = 120
+ },
+ .validCipherLenInBits = {
+ .len = 120
+ },
+ .validCipherOffsetLenInBits = {
+ .len = 64
+ }
+};
+
+struct kasumi_test_data kasumi_test_case_4 = {
+ .key = {
+ .data = {
+ 0xD3, 0xC5, 0xD5, 0x92, 0x32, 0x7F, 0xB1, 0x1C,
+ 0x40, 0x35, 0xC6, 0x68, 0x0A, 0xF8, 0xC6, 0xD1
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x39, 0x8A, 0x59, 0xB4, 0x2C, 0x00, 0x00, 0x00,
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB, 0x1A,
+ 0xB4, 0x85, 0x47, 0x20, 0x29, 0xB7, 0x1D, 0x80,
+ 0x8C, 0xE3, 0x3E, 0x2C, 0xC3, 0xC0, 0xB5, 0xFC,
+ 0x1F, 0x3D, 0xE8, 0xA6, 0xDC, 0x66, 0xB1, 0xF0
+ },
+ .len = 256
+ },
+ .ciphertext = {
+ .data = {
+ 0x5B, 0xB9, 0x43, 0x1B, 0xB1, 0xE9, 0x8B, 0xD1,
+ 0x1B, 0x93, 0xDB, 0x7C, 0x3D, 0x45, 0x13, 0x65,
+ 0x59, 0xBB, 0x86, 0xA2, 0x95, 0xAA, 0x20, 0x4E,
+ 0xCB, 0xEB, 0xF6, 0xF7, 0xA5, 0x10, 0x15, 0x10
+ },
+ .len = 256
+ },
+ .validCipherLenInBits = {
+ .len = 253
+ },
+ .validCipherOffsetLenInBits = {
+ .len = 64
+ }
+};
+
+struct kasumi_test_data kasumi_test_case_5 = {
+ .key = {
+ .data = {
+ 0x60, 0x90, 0xEA, 0xE0, 0x4C, 0x83, 0x70, 0x6E,
+ 0xEC, 0xBF, 0x65, 0x2B, 0xE8, 0xE3, 0x65, 0x66
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x72, 0xA4, 0xF2, 0x0F, 0x48, 0x00, 0x00, 0x00
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0x40, 0x98, 0x1B, 0xA6, 0x82, 0x4C, 0x1B, 0xFB,
+ 0x42, 0x86, 0xB2, 0x99, 0x78, 0x3D, 0xAF, 0x44,
+ 0x2C, 0x09, 0x9F, 0x7A, 0xB0, 0xF5, 0x8D, 0x5C,
+ 0x8E, 0x46, 0xB1, 0x04, 0xF0, 0x8F, 0x01, 0xB4,
+ 0x1A, 0xB4, 0x85, 0x47, 0x20, 0x29, 0xB7, 0x1D,
+ 0x36, 0xBD, 0x1A, 0x3D, 0x90, 0xDC, 0x3A, 0x41,
+ 0xB4, 0x6D, 0x51, 0x67, 0x2A, 0xC4, 0xC9, 0x66,
+ 0x3A, 0x2B, 0xE0, 0x63, 0xDA, 0x4B, 0xC8, 0xD2,
+ 0x80, 0x8C, 0xE3, 0x3E, 0x2C, 0xCC, 0xBF, 0xC6,
+ 0x34, 0xE1, 0xB2, 0x59, 0x06, 0x08, 0x76, 0xA0,
+ 0xFB, 0xB5, 0xA4, 0x37, 0xEB, 0xCC, 0x8D, 0x31,
+ 0xC1, 0x9E, 0x44, 0x54, 0x31, 0x87, 0x45, 0xE3,
+ 0x98, 0x76, 0x45, 0x98, 0x7A, 0x98, 0x6F, 0x2C,
+ 0xB0
+ },
+ .len = 840
+ },
+ .ciphertext = {
+ .data = {
+ 0xDD, 0xB3, 0x64, 0xDD, 0x2A, 0xAE, 0xC2, 0x4D,
+ 0xFF, 0x29, 0x19, 0x57, 0xB7, 0x8B, 0xAD, 0x06,
+ 0x3A, 0xC5, 0x79, 0xCD, 0x90, 0x41, 0xBA, 0xBE,
+ 0x89, 0xFD, 0x19, 0x5C, 0x05, 0x78, 0xCB, 0x9F,
+ 0xDE, 0x42, 0x17, 0x56, 0x61, 0x78, 0xD2, 0x02,
+ 0x40, 0x20, 0x6D, 0x07, 0xCF, 0xA6, 0x19, 0xEC,
+ 0x05, 0x9F, 0x63, 0x51, 0x44, 0x59, 0xFC, 0x10,
+ 0xD4, 0x2D, 0xC9, 0x93, 0x4E, 0x56, 0xEB, 0xC0,
+ 0xCB, 0xC6, 0x0D, 0x4D, 0x2D, 0xF1, 0x74, 0x77,
+ 0x4C, 0xBD, 0xCD, 0x5D, 0xA4, 0xA3, 0x50, 0x31,
+ 0x7A, 0x7F, 0x12, 0xE1, 0x94, 0x94, 0x71, 0xF8,
+ 0xA2, 0x95, 0xF2, 0x72, 0xE6, 0x8F, 0xC0, 0x71,
+ 0x59, 0xB0, 0x7D, 0x8E, 0x2D, 0x26, 0xE4, 0x59,
+ 0x9E
+ },
+ .len = 840
+ },
+ .validCipherLenInBits = {
+ .len = 837
+ },
+ .validCipherOffsetLenInBits = {
+ .len = 64
+ },
+};
+
+#endif /* TEST_CRYPTODEV_KASUMI_TEST_VECTORS_H_ */
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index b3f4fd9c..d7282112 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -38,7 +38,6 @@
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_cycles.h>
-#include <rte_hexdump.h>
#include "test.h"
#include "test_cryptodev.h"
@@ -58,6 +57,25 @@ struct crypto_testsuite_params {
uint8_t dev_id;
};
+enum chain_mode {
+ CIPHER_HASH,
+ HASH_CIPHER,
+ CIPHER_ONLY,
+ HASH_ONLY
+};
+
+struct perf_test_params {
+
+ unsigned total_operations;
+ unsigned burst_size;
+ unsigned buf_size;
+
+ enum chain_mode chain;
+
+ enum rte_crypto_cipher_algorithm cipher_algo;
+ unsigned cipher_key_length;
+ enum rte_crypto_auth_algorithm auth_algo;
+};
#define MAX_NUM_OF_OPS_PER_UT (128)
@@ -75,6 +93,98 @@ struct crypto_unittest_params {
uint8_t *digest;
};
+static struct rte_cryptodev_sym_session *
+test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
+ enum rte_crypto_cipher_algorithm cipher_algo, unsigned cipher_key_len,
+ enum rte_crypto_auth_algorithm auth_algo);
+static struct rte_mbuf *
+test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz);
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
+ struct rte_cryptodev_sym_session *sess, unsigned data_len,
+ unsigned digest_len);
+static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo);
+
+
+static const char *chain_mode_name(enum chain_mode mode)
+{
+ switch (mode) {
+ case CIPHER_HASH: return "cipher_hash"; break;
+ case HASH_CIPHER: return "hash_cipher"; break;
+ case CIPHER_ONLY: return "cipher_only"; break;
+ case HASH_ONLY: return "hash_only"; break;
+ default: return ""; break;
+ }
+}
+
+static const char *pmd_name(enum rte_cryptodev_type pmd)
+{
+ switch (pmd) {
+ case RTE_CRYPTODEV_NULL_PMD: return CRYPTODEV_NAME_NULL_PMD; break;
+ case RTE_CRYPTODEV_AESNI_GCM_PMD:
+ return CRYPTODEV_NAME_AESNI_GCM_PMD;
+ case RTE_CRYPTODEV_AESNI_MB_PMD:
+ return CRYPTODEV_NAME_AESNI_MB_PMD;
+ case RTE_CRYPTODEV_QAT_SYM_PMD:
+ return CRYPTODEV_NAME_QAT_SYM_PMD;
+ case RTE_CRYPTODEV_SNOW3G_PMD:
+ return CRYPTODEV_NAME_SNOW3G_PMD;
+ default:
+ return "";
+ }
+}
+
+static const char *cipher_algo_name(enum rte_crypto_cipher_algorithm cipher_algo)
+{
+ switch (cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL: return "NULL";
+ case RTE_CRYPTO_CIPHER_3DES_CBC: return "3DES_CBC";
+ case RTE_CRYPTO_CIPHER_3DES_CTR: return "3DES_CTR";
+ case RTE_CRYPTO_CIPHER_3DES_ECB: return "3DES_ECB";
+ case RTE_CRYPTO_CIPHER_AES_CBC: return "AES_CBC";
+ case RTE_CRYPTO_CIPHER_AES_CCM: return "AES_CCM";
+ case RTE_CRYPTO_CIPHER_AES_CTR: return "AES_CTR";
+ case RTE_CRYPTO_CIPHER_AES_ECB: return "AES_ECB";
+ case RTE_CRYPTO_CIPHER_AES_F8: return "AES_F8";
+ case RTE_CRYPTO_CIPHER_AES_GCM: return "AES_GCM";
+ case RTE_CRYPTO_CIPHER_AES_XTS: return "AES_XTS";
+ case RTE_CRYPTO_CIPHER_ARC4: return "ARC4";
+ case RTE_CRYPTO_CIPHER_KASUMI_F8: return "KASUMI_F8";
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: return "SNOW3G_UEA2";
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3: return "ZUC_EEA3";
+ default: return "Another cipher algo";
+ }
+}
+
+static const char *auth_algo_name(enum rte_crypto_auth_algorithm auth_algo)
+{
+ switch (auth_algo) {
+ case RTE_CRYPTO_AUTH_NULL: return "NULL"; break;
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC: return "AES_CBC_MAC"; break;
+ case RTE_CRYPTO_AUTH_AES_CCM: return "AES_CCM"; break;
+ case RTE_CRYPTO_AUTH_AES_CMAC: return "AES_CMAC,"; break;
+ case RTE_CRYPTO_AUTH_AES_GCM: return "AES_GCM"; break;
+ case RTE_CRYPTO_AUTH_AES_GMAC: return "AES_GMAC"; break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC: return "AES_XCBC_MAC"; break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9: return "KASUMI_F9"; break;
+ case RTE_CRYPTO_AUTH_MD5: return "MD5"; break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC: return "MD5_HMAC,"; break;
+ case RTE_CRYPTO_AUTH_SHA1: return "SHA1"; break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC: return "SHA1_HMAC"; break;
+ case RTE_CRYPTO_AUTH_SHA224: return "SHA224"; break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC: return "SHA224_HMAC"; break;
+ case RTE_CRYPTO_AUTH_SHA256: return "SHA256"; break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC: return "SHA256_HMAC"; break;
+ case RTE_CRYPTO_AUTH_SHA384: return "SHA384,"; break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC: return "SHA384_HMAC,"; break;
+ case RTE_CRYPTO_AUTH_SHA512: return "SHA512,"; break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC: return "SHA512_HMAC,"; break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2: return "SNOW3G_UIA2"; break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3: return "RTE_CRYPTO_AUTH_ZUC_EIA3"; break;
+ default: return "Another auth algo"; break;
+ };
+}
+
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
const uint8_t *data, size_t len, uint8_t blocksize)
@@ -97,7 +207,7 @@ setup_test_string(struct rte_mempool *mpool,
static struct crypto_testsuite_params testsuite_params = { NULL };
static struct crypto_unittest_params unittest_params;
-static enum rte_cryptodev_type gbl_cryptodev_preftest_devtype;
+static enum rte_cryptodev_type gbl_cryptodev_perftest_devtype;
static int
testsuite_setup(void)
@@ -134,7 +244,7 @@ testsuite_setup(void)
}
/* Create 2 AESNI MB devices if required */
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD) {
+ if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD) {
nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD);
if (nb_devs < 2) {
for (i = nb_devs; i < 2; i++) {
@@ -148,6 +258,21 @@ testsuite_setup(void)
}
}
+ /* Create 2 SNOW3G devices if required */
+ if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_SNOW3G_PMD) {
+ nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
+ if (nb_devs < 2) {
+ for (i = nb_devs; i < 2; i++) {
+ ret = rte_eal_vdev_init(
+ CRYPTODEV_NAME_SNOW3G_PMD, NULL);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to create instance %u of pmd : %s",
+ i, CRYPTODEV_NAME_SNOW3G_PMD);
+ }
+ }
+ }
+
nb_devs = rte_cryptodev_count();
if (nb_devs < 1) {
RTE_LOG(ERR, USER1, "No crypto devices found?");
@@ -157,7 +282,7 @@ testsuite_setup(void)
/* Search for the first valid */
for (i = 0; i < nb_devs; i++) {
rte_cryptodev_info_get(i, &info);
- if (info.dev_type == gbl_cryptodev_preftest_devtype) {
+ if (info.dev_type == gbl_cryptodev_perftest_devtype) {
ts_params->dev_id = i;
valid_dev_id = 1;
break;
@@ -218,7 +343,10 @@ testsuite_teardown(void)
if (ts_params->mbuf_mp != NULL)
RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_MBUFPOOL count %u\n",
- rte_mempool_count(ts_params->mbuf_mp));
+ rte_mempool_avail_count(ts_params->mbuf_mp));
+ if (ts_params->op_mpool != NULL)
+ RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_OP POOL count %u\n",
+ rte_mempool_avail_count(ts_params->op_mpool));
}
static int
@@ -267,7 +395,7 @@ ut_teardown(void)
if (ts_params->mbuf_mp != NULL)
RTE_LOG(DEBUG, USER1, "CRYPTO_PERF_MBUFPOOL count %u\n",
- rte_mempool_count(ts_params->mbuf_mp));
+ rte_mempool_avail_count(ts_params->mbuf_mp));
rte_cryptodev_stats_get(ts_params->dev_id, &stats);
@@ -363,12 +491,11 @@ const char plaintext_quote[] =
#define CIPHER_KEY_LENGTH_AES_CBC (16)
#define CIPHER_IV_LENGTH_AES_CBC (CIPHER_KEY_LENGTH_AES_CBC)
-
-static uint8_t aes_cbc_key[] = {
+static uint8_t aes_cbc_128_key[] = {
0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA };
-static uint8_t aes_cbc_iv[] = {
+static uint8_t aes_cbc_128_iv[] = {
0xf5, 0xd3, 0x89, 0x0f, 0x47, 0x00, 0xcb, 0x52,
0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1 };
@@ -1693,7 +1820,6 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
{ AES_CBC_ciphertext_2048B, HMAC_SHA256_ciphertext_2048B_digest } }
};
-
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
@@ -1718,7 +1844,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
+ ut_params->cipher_xform.cipher.key.data = aes_cbc_128_key;
ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
@@ -1774,7 +1900,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
+ rte_memcpy(op->sym->cipher.iv.data, aes_cbc_128_iv,
CIPHER_IV_LENGTH_AES_CBC);
op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
@@ -1829,7 +1955,7 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype ==
+ if (gbl_cryptodev_perftest_devtype ==
RTE_CRYPTODEV_AESNI_MB_PMD)
rte_cryptodev_enqueue_burst(dev_num, 0,
NULL, 0);
@@ -1857,192 +1983,906 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
static int
-test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
+test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
{
- uint16_t index;
- uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received;
- uint64_t failed_polls, retries, start_cycles, end_cycles;
- const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double throughput, mmps;
-
- struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
- struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
-
+ uint32_t num_to_submit = pparams->total_operations;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
+ uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
+ uint32_t burst_sent = 0, burst_received = 0;
+ uint32_t i, burst_size, num_sent, num_ops_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
- struct crypto_unittest_params *ut_params = &unittest_params;
- struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
+ static struct rte_cryptodev_sym_session *sess;
if (rte_cryptodev_count() == 0) {
- printf("\nNo crypto devices available. Is kernel driver loaded?\n");
+ printf("\nNo crypto devices found. Is PMD build configured?\n");
+ printf("\nAnd is kernel driver loaded for HW PMDs?\n");
return TEST_FAILED;
}
- /* Setup Cipher Parameters */
- ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- ut_params->cipher_xform.next = &ut_params->auth_xform;
+ /* Create Crypto session*/
+ sess = test_perf_create_snow3g_session(ts_params->dev_id,
+ pparams->chain, pparams->cipher_algo,
+ pparams->cipher_key_length, pparams->auth_algo);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
- ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->cipher_xform.cipher.key.data = aes_cbc_key;
- ut_params->cipher_xform.cipher.key.length = CIPHER_IV_LENGTH_AES_CBC;
+ /* Generate Crypto op data structure(s)*/
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = test_perf_create_pktmbuf(
+ ts_params->mbuf_mp,
+ pparams->buf_size);
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- /* Setup HMAC Parameters */
- ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- ut_params->auth_xform.next = NULL;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
- ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
- ut_params->auth_xform.auth.key.data = hmac_sha256_key;
- ut_params->auth_xform.auth.key.length = HMAC_KEY_LENGTH_SHA256;
- ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA256;
+ op = test_perf_set_crypto_op_snow3g(op, m, sess, pparams->buf_size,
+ get_auth_digest_length(pparams->auth_algo));
+ TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
- /* Create Crypto session*/
- ut_params->sess = rte_cryptodev_sym_session_create(ts_params->dev_id,
- &ut_params->cipher_xform);
+ c_ops[i] = op;
+ }
- TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+ printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, auth_algo:%s, "
+ "Packet Size %u bytes",
+ pmd_name(gbl_cryptodev_perftest_devtype),
+ ts_params->dev_id, 0,
+ chain_mode_name(pparams->chain),
+ cipher_algo_name(pparams->cipher_algo),
+ auth_algo_name(pparams->auth_algo),
+ pparams->buf_size);
+ printf("\nOps Tx\tOps Rx\tOps/burst ");
+ printf("Retries EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
- printf("\nThroughput test which will continually attempt to send "
- "AES128_CBC_SHA256_HMAC requests with a constant burst "
- "size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
- printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Gbps)");
- printf("\tRetries (Attempted a burst, but the device was busy)");
- for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
- num_received = 0;
+ num_ops_received = 0;
retries = 0;
failed_polls = 0;
+ burst_size = i;
+ total_cycles = 0;
+ while (num_sent < num_to_submit) {
+ start_cycles = rte_rdtsc_precise();
+ burst_sent = rte_cryptodev_enqueue_burst(ts_params->dev_id,
+ 0, &c_ops[num_sent],
+ ((num_to_submit-num_sent) < burst_size) ?
+ num_to_submit-num_sent : burst_size);
+ end_cycles = rte_rdtsc_precise();
+ if (burst_sent == 0)
+ retries++;
+ num_sent += burst_sent;
+ total_cycles += (end_cycles - start_cycles);
+
+ /* Wait until requests have been sent. */
+
+ rte_delay_ms(1);
+
+ start_cycles = rte_rdtsc_precise();
+ burst_received = rte_cryptodev_dequeue_burst(
+ ts_params->dev_id, 0, proc_ops, burst_size);
+ end_cycles = rte_rdtsc_precise();
+ if (burst_received < burst_sent)
+ failed_polls++;
+ num_ops_received += burst_received;
+
+ total_cycles += end_cycles - start_cycles;
+ }
+
+ while (num_ops_received != num_to_submit) {
+ if (gbl_cryptodev_perftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(ts_params->dev_id, 0,
+ NULL, 0);
+ start_cycles = rte_rdtsc_precise();
+ burst_received = rte_cryptodev_dequeue_burst(
+ ts_params->dev_id, 0, proc_ops, burst_size);
+ end_cycles = rte_rdtsc_precise();
+ total_cycles += end_cycles - start_cycles;
+ if (burst_received == 0)
+ failed_polls++;
+ num_ops_received += burst_received;
+ }
+
+ printf("\n%u\t%u\t%u", num_sent, num_ops_received, burst_size);
+ printf("\t\t%"PRIu64, retries);
+ printf("\t%"PRIu64, failed_polls);
+ printf("\t\t%"PRIu64, total_cycles/num_ops_received);
+ printf("\t\t%"PRIu64, (total_cycles/num_ops_received)*burst_size);
+ printf("\t\t%"PRIu64, total_cycles/(num_ops_received*pparams->buf_size));
+ }
+ printf("\n");
+
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_perf_snow3G_vary_burst_size(void)
+{
+ unsigned total_operations = 4096;
+ /*no need to vary pkt size for QAT, should have no effect on IA cycles */
+ uint16_t buf_lengths[] = {40};
+ uint8_t i, j;
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_ONLY,
+ .cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ },
+ {
+ .chain = HASH_ONLY,
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .cipher_key_length = 16
+ },
+ };
+
+ printf("\n\nStart %s.", __func__);
+ printf("\nThis Test measures the average IA cycle cost using a "
+ "constant request(packet) size. ");
+ printf("Cycle cost is only valid when indicators show device is not busy,"
+ " i.e. Retries and EmptyPolls = 0");
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+ printf("\n");
+ params_set[i].total_operations = total_operations;
+
+ for (j = 0;
+ j < RTE_DIM(buf_lengths);
+ j++) {
+
+ params_set[i].buf_size = buf_lengths[j];
+
+ test_perf_snow3G_optimise_cyclecount(&params_set[i]);
+ }
+
+ }
+
+ return 0;
+}
+
+static uint32_t get_auth_key_max_length(enum rte_crypto_auth_algorithm algo)
+{
+ switch (algo) {
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ return 16;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ return 64;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ return 64;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ return 64;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ return 128;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ return 128;
+ default:
+ return 0;
+ }
+}
+
+static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo)
+{
+ switch (algo) {
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ return 4;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ return TRUNCATED_DIGEST_BYTE_LENGTH_SHA1;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ return TRUNCATED_DIGEST_BYTE_LENGTH_SHA224;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ return TRUNCATED_DIGEST_BYTE_LENGTH_SHA256;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ return TRUNCATED_DIGEST_BYTE_LENGTH_SHA384;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ return TRUNCATED_DIGEST_BYTE_LENGTH_SHA512;
+ default:
+ return 0;
+ }
+}
+
+static uint8_t aes_cbc_key[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static uint8_t aes_cbc_iv[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static uint8_t hmac_sha_key[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static uint8_t snow3g_cipher_key[] = {
+ 0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00,
+ 0x95, 0x2C, 0x49, 0x10, 0x48, 0x81, 0xFF, 0x48
+};
+
+static uint8_t snow3g_iv[] = {
+ 0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00,
+ 0x72, 0xA4, 0xF2, 0x0F, 0x64, 0x00, 0x00, 0x00
+};
+
+static uint8_t snow3g_hash_key[] = {
+ 0xC7, 0x36, 0xC6, 0xAA, 0xB2, 0x2B, 0xFF, 0xF9,
+ 0x1E, 0x26, 0x98, 0xD2, 0xE2, 0x2A, 0xD5, 0x7E
+};
+
+static struct rte_cryptodev_sym_session *
+test_perf_create_aes_sha_session(uint8_t dev_id, enum chain_mode chain,
+ enum rte_crypto_cipher_algorithm cipher_algo,
+ unsigned cipher_key_len,
+ enum rte_crypto_auth_algorithm auth_algo)
+{
+ struct rte_crypto_sym_xform cipher_xform = { 0 };
+ struct rte_crypto_sym_xform auth_xform = { 0 };
+
+
+ /* Setup Cipher Parameters */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.cipher.algo = cipher_algo;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ cipher_xform.cipher.key.data = aes_cbc_key;
+ cipher_xform.cipher.key.length = cipher_key_len;
+
+ /* Setup HMAC Parameters */
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ auth_xform.auth.algo = auth_algo;
+
+ auth_xform.auth.key.data = hmac_sha_key;
+ auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
+ auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+
+ switch (chain) {
+ case CIPHER_HASH:
+ cipher_xform.next = &auth_xform;
+ auth_xform.next = NULL;
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ case HASH_CIPHER:
+ auth_xform.next = &cipher_xform;
+ cipher_xform.next = NULL;
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ default:
+ return NULL;
+ }
+}
+
+static struct rte_cryptodev_sym_session *
+test_perf_create_snow3g_session(uint8_t dev_id, enum chain_mode chain,
+ enum rte_crypto_cipher_algorithm cipher_algo, unsigned cipher_key_len,
+ enum rte_crypto_auth_algorithm auth_algo)
+{
+ struct rte_crypto_sym_xform cipher_xform = {0};
+ struct rte_crypto_sym_xform auth_xform = {0};
+
+
+ /* Setup Cipher Parameters */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.cipher.algo = cipher_algo;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ cipher_xform.cipher.key.data = snow3g_cipher_key;
+ cipher_xform.cipher.key.length = cipher_key_len;
+
+ /* Setup HMAC Parameters */
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ auth_xform.auth.algo = auth_algo;
+
+ auth_xform.auth.key.data = snow3g_hash_key;
+ auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
+ auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+
+ switch (chain) {
+ case CIPHER_HASH:
+ cipher_xform.next = &auth_xform;
+ auth_xform.next = NULL;
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ case HASH_CIPHER:
+ auth_xform.next = &cipher_xform;
+ cipher_xform.next = NULL;
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ case CIPHER_ONLY:
+ cipher_xform.next = NULL;
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ case HASH_ONLY:
+ auth_xform.next = NULL;
+ /* Create Crypto session */
+ return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ default:
+ return NULL;
+ }
+}
+
+#define AES_CBC_BLOCK_SIZE 16
+#define AES_CBC_CIPHER_IV_LENGTH 16
+#define SNOW3G_CIPHER_IV_LENGTH 16
+
+static struct rte_mbuf *
+test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz)
+{
+ struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
+
+ if (rte_pktmbuf_append(m, buf_sz) == NULL) {
+ rte_pktmbuf_free(m);
+ return NULL;
+ }
+
+ memset(rte_pktmbuf_mtod(m, uint8_t *), 0, buf_sz);
+
+ return m;
+}
+
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op(struct rte_crypto_op *op, struct rte_mbuf *m,
+ struct rte_cryptodev_sym_session *sess, unsigned data_len,
+ unsigned digest_len)
+{
+ if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+ rte_crypto_op_free(op);
+ return NULL;
+ }
- /* Generate Crypto op data structure(s) */
- for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf *m = setup_test_string(
- ts_params->mbuf_mp,
- (const uint8_t *)
- data_params[index].plaintext,
- data_params[index].length,
- 0);
+ /* Authentication Parameters */
+ op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
+ (m->data_off + data_len);
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, data_len);
+ op->sym->auth.digest.length = digest_len;
+ op->sym->auth.aad.data = aes_cbc_iv;
+ op->sym->auth.aad.length = AES_CBC_CIPHER_IV_LENGTH;
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
- DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest
- , "no room to append digest");
+ /* Cipher Parameters */
+ op->sym->cipher.iv.data = aes_cbc_iv;
+ op->sym->cipher.iv.length = AES_CBC_CIPHER_IV_LENGTH;
- rte_memcpy(ut_params->digest,
- data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ /* Data lengths/offsets Parameters */
+ op->sym->auth.data.offset = 0;
+ op->sym->auth.data.length = data_len;
- struct rte_crypto_op *op = rte_crypto_op_alloc(
- ts_params->op_mpool,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ op->sym->cipher.data.offset = AES_CBC_BLOCK_SIZE;
+ op->sym->cipher.data.length = data_len - AES_CBC_BLOCK_SIZE;
- rte_crypto_op_attach_sym_session(op, ut_params->sess);
+ op->sym->m_src = m;
- op->sym->auth.digest.data = ut_params->digest;
- op->sym->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(m,
- data_params[index].length);
- op->sym->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ return op;
+}
+
+static inline struct rte_crypto_op *
+test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
+ struct rte_cryptodev_sym_session *sess, unsigned data_len,
+ unsigned digest_len)
+{
+ if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+ rte_crypto_op_free(op);
+ return NULL;
+ }
+
+ /* Authentication Parameters */
+ op->sym->auth.digest.data = (uint8_t *)m->buf_addr +
+ (m->data_off + data_len);
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, data_len);
+ op->sym->auth.digest.length = digest_len;
+ op->sym->auth.aad.data = snow3g_iv;
+ op->sym->auth.aad.length = SNOW3G_CIPHER_IV_LENGTH;
+
+ /* Cipher Parameters */
+ op->sym->cipher.iv.data = snow3g_iv;
+ op->sym->cipher.iv.length = SNOW3G_CIPHER_IV_LENGTH;
+
+ /* Data lengths/offsets Parameters */
+ op->sym->auth.data.offset = 0;
+ op->sym->auth.data.length = data_len << 3;
+
+ op->sym->cipher.data.offset = 0;
+ op->sym->cipher.data.length = data_len << 3;
+
+ op->sym->m_src = m;
+
+ return op;
+}
+
+
+
+/* An mbuf set is used in each burst. An mbuf can be used by multiple bursts at
+ * same time, i.e. as they're not dereferenced there's no need to wait until
+ * finished with to re-use */
+#define NUM_MBUF_SETS 8
+
+static int
+test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
+ struct perf_test_params *pparams)
+{
+ uint16_t i, k, l, m;
+ uint16_t j = 0;
+ uint16_t ops_unused = 0;
- op->sym->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- op->sym->auth.data.length = data_params[index].length;
+ uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+ uint64_t processed = 0, failed_polls = 0, retries = 0;
+ uint64_t tsc_start = 0, tsc_end = 0;
- op->sym->cipher.iv.data = (uint8_t *)
- rte_pktmbuf_prepend(m,
- CIPHER_IV_LENGTH_AES_CBC);
- op->sym->cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
- op->sym->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ uint16_t digest_length = get_auth_digest_length(pparams->auth_algo);
- rte_memcpy(op->sym->cipher.iv.data, aes_cbc_iv,
- CIPHER_IV_LENGTH_AES_CBC);
+ struct rte_crypto_op *ops[pparams->burst_size];
+ struct rte_crypto_op *proc_ops[pparams->burst_size];
- op->sym->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
- op->sym->cipher.data.length = data_params[index].length;
+ struct rte_mbuf *mbufs[pparams->burst_size * 8];
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
- op->sym->m_src = m;
+ static struct rte_cryptodev_sym_session *sess;
+
+ if (rte_cryptodev_count() == 0) {
+ printf("\nNo crypto devices available. Is kernel driver loaded?\n");
+ return TEST_FAILED;
+ }
- c_ops[b] = op;
+ /* Create Crypto session*/
+ sess = test_perf_create_aes_sha_session(ts_params->dev_id,
+ pparams->chain, pparams->cipher_algo,
+ pparams->cipher_key_length, pparams->auth_algo);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+ /* Generate a burst of crypto operations */
+ for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+ mbufs[i] = test_perf_create_pktmbuf(
+ ts_params->mbuf_mp,
+ pparams->buf_size);
+
+ if (mbufs[i] == NULL) {
+ printf("\nFailed to get mbuf - freeing the rest.\n");
+ for (k = 0; k < i; k++)
+ rte_pktmbuf_free(mbufs[k]);
+ return -1;
}
- start_cycles = rte_rdtsc_precise();
- while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
- num_sent) < DEFAULT_BURST_SIZE ?
- DEFAULT_NUM_REQS_TO_SUBMIT -
- num_sent : DEFAULT_BURST_SIZE;
-
- burst_sent = rte_cryptodev_enqueue_burst(
- dev_num, 0, c_ops, burst_size);
- if (burst_sent == 0)
+
+ }
+
+
+ tsc_start = rte_rdtsc_precise();
+
+ while (total_enqueued < pparams->total_operations) {
+ uint16_t burst_size =
+ total_enqueued+pparams->burst_size <= pparams->total_operations ?
+ pparams->burst_size : pparams->total_operations-total_enqueued;
+ uint16_t ops_needed = burst_size-ops_unused;
+
+ if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+ printf("\nFailed to alloc enough ops, finish dequeuing "
+ "and free ops below.");
+ } else {
+ for (i = 0; i < ops_needed; i++)
+ ops[i] = test_perf_set_crypto_op(ops[i],
+ mbufs[i + (pparams->burst_size *
+ (j % NUM_MBUF_SETS))],
+ sess, pparams->buf_size, digest_length);
+
+ /* enqueue burst */
+ burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+ queue_id, ops, burst_size);
+
+ if (burst_enqueued < burst_size)
retries++;
- else
- num_sent += burst_sent;
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
- 0, proc_ops, DEFAULT_BURST_SIZE);
- if (burst_received == 0)
- failed_polls++;
- else
- num_received += burst_received;
+ ops_unused = burst_size-burst_enqueued;
+ total_enqueued += burst_enqueued;
}
- while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype ==
- RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0,
- NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(
- dev_num, 0, proc_ops,
- DEFAULT_BURST_SIZE);
- if (burst_received == 0)
- failed_polls++;
- else
- num_received += burst_received;
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (l = 0; l < burst_dequeued; l++)
+ rte_crypto_op_free(proc_ops[l]);
}
- end_cycles = rte_rdtsc_precise();
- mmps = ((double)num_received * mhz) /
- (end_cycles - start_cycles);
- throughput = (mmps * data_params[index].length * 8) / 1000;
-
- printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length,
- num_sent, num_received);
- printf("\t%.2f\t%.2f", mmps, throughput);
- printf("\t\t%"PRIu64, retries);
- for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- rte_pktmbuf_free(c_ops[b]->sym->m_src);
- rte_crypto_op_free(c_ops[b]);
+ j++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (processed < pparams->total_operations) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (m = 0; m < burst_dequeued; m++)
+ rte_crypto_op_free(proc_ops[m]);
+ }
+ }
+
+ tsc_end = rte_rdtsc_precise();
+
+ double ops_s = ((double)processed / (tsc_end - tsc_start)) * rte_get_tsc_hz();
+ double throughput = (ops_s * pparams->buf_size * 8) / 1000000000;
+
+ printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size, ops_s/1000000,
+ throughput, retries, failed_polls);
+
+ for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+ rte_pktmbuf_free(mbufs[i]);
+
+ printf("\n");
+ return TEST_SUCCESS;
+}
+
+
+static int
+test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
+ struct perf_test_params *pparams)
+{
+ uint16_t i, k, l, m;
+ uint16_t j = 0;
+ uint16_t ops_unused = 0;
+ uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+ uint64_t processed = 0, failed_polls = 0, retries = 0;
+ uint64_t tsc_start = 0, tsc_end = 0;
+
+ uint16_t digest_length = get_auth_digest_length(pparams->auth_algo);
+
+ struct rte_crypto_op *ops[pparams->burst_size];
+ struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+ struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
+
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ static struct rte_cryptodev_sym_session *sess;
+
+ if (rte_cryptodev_count() == 0) {
+ printf("\nNo crypto devices found. Is PMD build configured?\n");
+ printf("\nAnd is kernel driver loaded for HW PMDs?\n");
+ return TEST_FAILED;
+ }
+
+ /* Create Crypto session*/
+ sess = test_perf_create_snow3g_session(ts_params->dev_id,
+ pparams->chain, pparams->cipher_algo,
+ pparams->cipher_key_length, pparams->auth_algo);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+ /* Generate a burst of crypto operations */
+ for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+ mbufs[i] = test_perf_create_pktmbuf(
+ ts_params->mbuf_mp,
+ pparams->buf_size);
+
+ if (mbufs[i] == NULL) {
+ printf("\nFailed to get mbuf - freeing the rest.\n");
+ for (k = 0; k < i; k++)
+ rte_pktmbuf_free(mbufs[k]);
+ return -1;
+ }
+
+ }
+
+ tsc_start = rte_rdtsc_precise();
+
+ while (total_enqueued < pparams->total_operations) {
+ uint16_t burst_size =
+ (total_enqueued+pparams->burst_size)
+ <= pparams->total_operations ?
+ pparams->burst_size : pparams->total_operations-total_enqueued;
+ uint16_t ops_needed = burst_size-ops_unused;
+ /* Handle the last burst correctly */
+ uint16_t op_offset = pparams->burst_size - burst_size;
+
+ if (ops_needed !=
+ rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops+op_offset, ops_needed)) {
+ printf("\nFailed to alloc enough ops.");
+ /*Don't exit, dequeue, more ops should become available*/
+ } else {
+ for (i = 0; i < ops_needed; i++) {
+ ops[i+op_offset] =
+ test_perf_set_crypto_op_snow3g(ops[i+op_offset],
+ mbufs[i +
+ (pparams->burst_size * (j % NUM_MBUF_SETS))],
+ sess,
+ pparams->buf_size, digest_length);
+ }
+
+ /* enqueue burst */
+ burst_enqueued =
+ rte_cryptodev_enqueue_burst(dev_id, queue_id,
+ ops+op_offset, burst_size);
+
+ if (burst_enqueued < burst_size)
+ retries++;
+
+ ops_unused = burst_size-burst_enqueued;
+ total_enqueued += burst_enqueued;
+ }
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0) {
+ failed_polls++;
+ } else {
+ processed += burst_dequeued;
+ for (l = 0; l < burst_dequeued; l++)
+ rte_crypto_op_free(proc_ops[l]);
+ }
+ j++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (processed < pparams->total_operations) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+ for (m = 0; m < burst_dequeued; m++)
+ rte_crypto_op_free(proc_ops[m]);
}
}
+ tsc_end = rte_rdtsc_precise();
+
+ double ops_s = ((double)processed / (tsc_end - tsc_start)) * rte_get_tsc_hz();
+ double cycles_burst = (double) (tsc_end - tsc_start) /
+ (double) processed * pparams->burst_size;
+ double cycles_buff = (double) (tsc_end - tsc_start) / (double) processed;
+ double cycles_B = cycles_buff / pparams->buf_size;
+ double throughput = (ops_s * pparams->buf_size * 8) / 1000000;
+
+ if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_QAT_SYM_PMD) {
+ /* Cycle count misleading on HW devices for this test, so don't print */
+ printf("%4u\t%6.2f\t%10.2f\t n/a \t\t n/a "
+ "\t\t n/a \t\t%8"PRIu64"\t%8"PRIu64,
+ pparams->buf_size, ops_s/1000000,
+ throughput, retries, failed_polls);
+ } else {
+ printf("%4u\t%6.2f\t%10.2f\t%10.2f\t%8.2f"
+ "\t%8.2f\t%8"PRIu64"\t%8"PRIu64,
+ pparams->buf_size, ops_s/1000000, throughput, cycles_burst,
+ cycles_buff, cycles_B, retries, failed_polls);
+ }
+
+ for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+ rte_pktmbuf_free(mbufs[i]);
+
printf("\n");
return TEST_SUCCESS;
}
+/*
+
+ perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA1);
+ perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA_256);
+ perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA_512);
+
+ perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA1);
+ perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA_256);
+ perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA_512);
+
+ perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA1);
+ perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA_256);
+ perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA_512);
+ */
+static int
+test_perf_aes_cbc_encrypt_digest_vary_pkt_size(void)
+{
+ unsigned total_operations = 1000000;
+ unsigned burst_size = 32;
+ unsigned buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536, 1792, 2048 };
+ uint8_t i, j;
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 32,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 32,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 32,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
+ },
+ };
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+
+ params_set[i].total_operations = total_operations;
+ params_set[i].burst_size = burst_size;
+ printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+ " burst_size: %d ops\n",
+ chain_mode_name(params_set[i].chain),
+ cipher_algo_name(params_set[i].cipher_algo),
+ auth_algo_name(params_set[i].auth_algo),
+ params_set[i].cipher_key_length,
+ burst_size);
+ printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
+ "Retries\tEmptyPolls\n");
+ for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+ params_set[i].buf_size = buf_lengths[j];
+ test_perf_aes_sha(testsuite_params.dev_id, 0,
+ &params_set[i]);
+ }
+ }
+ return 0;
+}
+
static int
-test_perf_encrypt_digest_vary_req_size(void)
+test_perf_snow3G_vary_pkt_size(void)
{
- return test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(
- testsuite_params.dev_id);
+ unsigned total_operations = 1000000;
+ uint8_t i, j;
+ unsigned k;
+ uint16_t burst_sizes[] = {64};
+ uint16_t buf_lengths[] = {40, 64, 80, 120, 240, 256, 400, 512, 600, 1024, 2048};
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_ONLY,
+ .cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ },
+ {
+ .chain = HASH_ONLY,
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .cipher_key_length = 16
+ },
+ };
+
+ printf("\n\nStart %s.", __func__);
+ printf("\nTest to measure max throughput at various pkt sizes.");
+ printf("\nOn HW devices t'put maximised when high Retries and EmptyPolls"
+ " so cycle cost not relevant (n/a displayed).");
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+ printf("\n\n");
+ params_set[i].total_operations = total_operations;
+ for (k = 0; k < RTE_DIM(burst_sizes); k++) {
+ printf("\nOn %s dev%u qp%u, %s, "
+ "cipher algo:%s, auth algo:%s, burst_size: %d ops",
+ pmd_name(gbl_cryptodev_perftest_devtype),
+ testsuite_params.dev_id, 0,
+ chain_mode_name(params_set[i].chain),
+ cipher_algo_name(params_set[i].cipher_algo),
+ auth_algo_name(params_set[i].auth_algo),
+ burst_sizes[k]);
+
+ params_set[i].burst_size = burst_sizes[k];
+ printf("\nPktSzB\tOp/s(M)\tThruput(Mbps)\tCycles/Burst\t"
+ "Cycles/buf\tCycles/B\tRetries\t\tEmptyPolls\n");
+ for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+
+ params_set[i].buf_size = buf_lengths[j];
+
+ test_perf_snow3g(testsuite_params.dev_id, 0, &params_set[i]);
+ }
+ }
+ }
+
+ return 0;
}
static int
-test_perf_vary_burst_size(void)
+test_perf_aes_cbc_vary_burst_size(void)
{
return test_perf_crypto_qp_vary_burst_size(testsuite_params.dev_id);
}
-
+#if 1
static struct unit_test_suite cryptodev_testsuite = {
.suite_name = "Crypto Device Unit Test Suite",
.setup = testsuite_setup,
.teardown = testsuite_teardown,
.unit_test_cases = {
TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_encrypt_digest_vary_req_size),
+ test_perf_aes_cbc_encrypt_digest_vary_pkt_size),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_perf_aes_cbc_vary_burst_size),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+#endif
+static struct unit_test_suite cryptodev_aes_testsuite = {
+ .suite_name = "Crypto Device AESNI MB Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_perf_aes_cbc_encrypt_digest_vary_pkt_size),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static struct unit_test_suite cryptodev_snow3g_testsuite = {
+ .suite_name = "Crypto Device Snow3G Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_perf_snow3G_vary_pkt_size),
TEST_CASE_ST(ut_setup, ut_teardown,
- test_perf_vary_burst_size),
+ test_perf_snow3G_vary_burst_size),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
@@ -2050,19 +2890,35 @@ static struct unit_test_suite cryptodev_testsuite = {
static int
perftest_aesni_mb_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_AESNI_MB_PMD;
+ gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_AESNI_MB_PMD;
- return unit_test_suite_runner(&cryptodev_testsuite);
+ return unit_test_suite_runner(&cryptodev_aes_testsuite);
}
static int
perftest_qat_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
{
- gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+ gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
return unit_test_suite_runner(&cryptodev_testsuite);
}
+static int
+perftest_sw_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_SNOW3G_PMD;
+
+ return unit_test_suite_runner(&cryptodev_snow3g_testsuite);
+}
+
+static int
+perftest_qat_snow3g_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_QAT_SYM_PMD;
+
+ return unit_test_suite_runner(&cryptodev_snow3g_testsuite);
+}
+
static struct test_command cryptodev_aesni_mb_perf_cmd = {
.command = "cryptodev_aesni_mb_perftest",
.callback = perftest_aesni_mb_cryptodev,
@@ -2073,5 +2929,17 @@ static struct test_command cryptodev_qat_perf_cmd = {
.callback = perftest_qat_cryptodev,
};
+static struct test_command cryptodev_sw_snow3g_perf_cmd = {
+ .command = "cryptodev_sw_snow3g_perftest",
+ .callback = perftest_sw_snow3g_cryptodev,
+};
+
+static struct test_command cryptodev_qat_snow3g_perf_cmd = {
+ .command = "cryptodev_qat_snow3g_perftest",
+ .callback = perftest_qat_snow3g_cryptodev,
+};
+
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_perf_cmd);
REGISTER_TEST_COMMAND(cryptodev_qat_perf_cmd);
+REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_perf_cmd);
+REGISTER_TEST_COMMAND(cryptodev_qat_snow3g_perf_cmd);
diff --git a/app/test/test_cryptodev_snow3g_hash_test_vectors.h b/app/test/test_cryptodev_snow3g_hash_test_vectors.h
index fe4906bc..a8a47db5 100644
--- a/app/test/test_cryptodev_snow3g_hash_test_vectors.h
+++ b/app/test/test_cryptodev_snow3g_hash_test_vectors.h
@@ -438,4 +438,111 @@ struct snow3g_hash_test_data snow3g_hash_test_case_3 = {
}
};
+struct snow3g_hash_test_data snow3g_hash_test_case_4 = {
+ .key = {
+ .data = {
+ 0x2B, 0xD6, 0x45, 0x9F, 0x82, 0xC5, 0xB3, 0x00,
+ 0x95, 0x2C, 0x49, 0x10, 0x48, 0x81, 0xFF, 0x48
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
+ 0x38, 0xA6, 0xF0, 0x56, 0x05, 0xD2, 0xEC, 0x49,
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x6B, 0x22, 0x77, 0x37, 0x29, 0x6F, 0x39, 0x3C,
+ 0x80, 0x79, 0x35, 0x3E, 0xDC, 0x87, 0xE2, 0xE8,
+ 0x05, 0xD2, 0xEC, 0x49, 0xA4, 0xF2, 0xD8, 0xE0
+ },
+ .len = 189
+ },
+ .validAuthLenInBits = {
+ .len = 189
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0x2B, 0xCE, 0x18, 0x20},
+ .len = 4
+ }
+};
+
+struct snow3g_hash_test_data snow3g_hash_test_case_5 = {
+ .key = {
+ .data = {
+ 0xD4, 0x2F, 0x68, 0x24, 0x28, 0x20, 0x1C, 0xAF,
+ 0xCD, 0x9F, 0x97, 0x94, 0x5E, 0x6D, 0xE7, 0xB7
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x3E, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0xD8, 0xE2,
+ 0xBE, 0xDC, 0x87, 0xE2, 0xA4, 0xF2, 0x58, 0xE2
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0xB5, 0x92, 0x43, 0x84, 0x32, 0x8A, 0x4A, 0xE0,
+ 0x0B, 0x73, 0x71, 0x09, 0xF8, 0xB6, 0xC8, 0xDD,
+ 0x2B, 0x4D, 0xB6, 0x3D, 0xD5, 0x33, 0x98, 0x1C,
+ 0xEB, 0x19, 0xAA, 0xD5, 0x2A, 0x5B, 0x2B, 0xC0
+ },
+ .len = 254
+ },
+ .validAuthLenInBits = {
+ .len = 254
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0xFC, 0x7B, 0x18, 0xBD},
+ .len = 4
+ }
+};
+
+struct snow3g_hash_test_data snow3g_hash_test_case_6 = {
+ .key = {
+ .data = {
+ 0xFD, 0xB9, 0xCF, 0xDF, 0x28, 0x93, 0x6C, 0xC4,
+ 0x83, 0xA3, 0x18, 0x69, 0xD8, 0x1B, 0x8F, 0xAB
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x36, 0xAF, 0x61, 0x44, 0x98, 0x38, 0xF0, 0x3A,
+ 0xB6, 0xAF, 0x61, 0x44, 0x98, 0x38, 0x70, 0x3A
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x59, 0x32, 0xBC, 0x0A, 0xCE, 0x2B, 0x0A, 0xBA,
+ 0x33, 0xD8, 0xAC, 0x18, 0x8A, 0xC5, 0x4F, 0x34,
+ 0x6F, 0xAD, 0x10, 0xBF, 0x9D, 0xEE, 0x29, 0x20,
+ 0xB4, 0x3B, 0xD0, 0xC5, 0x3A, 0x91, 0x5C, 0xB7,
+ 0xDF, 0x6C, 0xAA, 0x72, 0x05, 0x3A, 0xBF, 0xF2
+ },
+ .len = 319
+ },
+ .validAuthLenInBits = {
+ .len = 319
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0x02, 0xF1, 0xFA, 0xAF},
+ .len = 4
+ }
+};
#endif /* TEST_CRYPTODEV_SNOW3G_HASH_TEST_VECTORS_H_ */
diff --git a/app/test/test_hash.c b/app/test/test_hash.c
index adbdb4a8..7e41725a 100644
--- a/app/test/test_hash.c
+++ b/app/test/test_hash.c
@@ -176,7 +176,7 @@ static struct rte_hash_parameters ut_params = {
.socket_id = 0,
};
-#define CRC32_ITERATIONS (1U << 20)
+#define CRC32_ITERATIONS (1U << 10)
#define CRC32_DWORDS (1U << 6)
/*
* Test if all CRC32 implementations yield the same hash value
@@ -486,7 +486,7 @@ static int test_five_keys(void)
for(i = 0; i < 5; i++)
key_array[i] = &keys[i];
- ret = rte_hash_lookup_multi(handle, &key_array[0], 5, (int32_t *)pos);
+ ret = rte_hash_lookup_bulk(handle, &key_array[0], 5, (int32_t *)pos);
if(ret == 0)
for(i = 0; i < 5; i++) {
print_key_info("Lkp", key_array[i], pos[i]);
@@ -527,7 +527,7 @@ static int test_five_keys(void)
}
/* Lookup multi */
- ret = rte_hash_lookup_multi(handle, &key_array[0], 5, (int32_t *)pos);
+ ret = rte_hash_lookup_bulk(handle, &key_array[0], 5, (int32_t *)pos);
if (ret == 0)
for (i = 0; i < 5; i++) {
print_key_info("Lkp", key_array[i], pos[i]);
@@ -1081,7 +1081,7 @@ test_hash_creation_with_good_parameters(void)
return 0;
}
-#define ITERATIONS 50
+#define ITERATIONS 3
/*
* Test to see the average table utilization (entries added/max entries)
* before hitting a random entry that cannot be added
@@ -1098,7 +1098,7 @@ static int test_average_table_utilization(void)
"\n before adding elements begins to fail\n");
printf("Measuring performance, please wait");
fflush(stdout);
- ut_params.entries = 1 << 20;
+ ut_params.entries = 1 << 16;
ut_params.name = "test_average_utilization";
ut_params.hash_func = rte_jhash;
handle = rte_hash_create(&ut_params);
@@ -1138,7 +1138,7 @@ static int test_average_table_utilization(void)
return 0;
}
-#define NUM_ENTRIES 1024
+#define NUM_ENTRIES 256
static int test_hash_iteration(void)
{
struct rte_hash *handle;
diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
new file mode 100644
index 00000000..b0f31b00
--- /dev/null
+++ b/app/test/test_hash_multiwriter.c
@@ -0,0 +1,287 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <inttypes.h>
+#include <locale.h>
+
+#include <rte_cycles.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+#include <rte_launch.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_spinlock.h>
+
+#include "test.h"
+
+/*
+ * Check condition and return an error if true. Assumes that "handle" is the
+ * name of the hash structure pointer to be freed.
+ */
+#define RETURN_IF_ERROR(cond, str, ...) do { \
+ if (cond) { \
+ printf("ERROR line %d: " str "\n", __LINE__, \
+ ##__VA_ARGS__); \
+ if (handle) \
+ rte_hash_free(handle); \
+ return -1; \
+ } \
+} while (0)
+
+#define RTE_APP_TEST_HASH_MULTIWRITER_FAILED 0
+
+struct {
+ uint32_t *keys;
+ uint32_t *found;
+ uint32_t nb_tsx_insertion;
+ struct rte_hash *h;
+} tbl_multiwriter_test_params;
+
+const uint32_t nb_entries = 16*1024*1024;
+const uint32_t nb_total_tsx_insertion = 15*1024*1024;
+uint32_t rounded_nb_total_tsx_insertion;
+
+static rte_atomic64_t gcycles;
+static rte_atomic64_t ginsertions;
+
+static int use_htm;
+
+static int
+test_hash_multiwriter_worker(__attribute__((unused)) void *arg)
+{
+ uint64_t i, offset;
+ uint32_t lcore_id = rte_lcore_id();
+ uint64_t begin, cycles;
+
+ offset = (lcore_id - rte_get_master_lcore())
+ * tbl_multiwriter_test_params.nb_tsx_insertion;
+
+ printf("Core #%d inserting %d: %'"PRId64" - %'"PRId64"\n",
+ lcore_id, tbl_multiwriter_test_params.nb_tsx_insertion,
+ offset, offset + tbl_multiwriter_test_params.nb_tsx_insertion);
+
+ begin = rte_rdtsc_precise();
+
+ for (i = offset;
+ i < offset + tbl_multiwriter_test_params.nb_tsx_insertion;
+ i++) {
+ if (rte_hash_add_key(tbl_multiwriter_test_params.h,
+ tbl_multiwriter_test_params.keys + i) < 0)
+ break;
+ }
+
+ cycles = rte_rdtsc_precise() - begin;
+ rte_atomic64_add(&gcycles, cycles);
+ rte_atomic64_add(&ginsertions, i - offset);
+
+ for (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)
+ tbl_multiwriter_test_params.keys[i]
+ = RTE_APP_TEST_HASH_MULTIWRITER_FAILED;
+
+ return 0;
+}
+
+
+static int
+test_hash_multiwriter(void)
+{
+ unsigned int i, rounded_nb_total_tsx_insertion;
+ static unsigned calledCount = 1;
+
+ uint32_t *keys;
+ uint32_t *found;
+
+ struct rte_hash_parameters hash_params = {
+ .entries = nb_entries,
+ .key_len = sizeof(uint32_t),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+ if (use_htm)
+ hash_params.extra_flag =
+ RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT
+ | RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD;
+ else
+ hash_params.extra_flag =
+ RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD;
+
+ struct rte_hash *handle;
+ char name[RTE_HASH_NAMESIZE];
+
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+
+ uint32_t duplicated_keys = 0;
+ uint32_t lost_keys = 0;
+
+ snprintf(name, 32, "test%u", calledCount++);
+ hash_params.name = name;
+
+ handle = rte_hash_create(&hash_params);
+ RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+ tbl_multiwriter_test_params.h = handle;
+ tbl_multiwriter_test_params.nb_tsx_insertion =
+ nb_total_tsx_insertion / rte_lcore_count();
+
+ rounded_nb_total_tsx_insertion = (nb_total_tsx_insertion /
+ tbl_multiwriter_test_params.nb_tsx_insertion)
+ * tbl_multiwriter_test_params.nb_tsx_insertion;
+
+ rte_srand(rte_rdtsc());
+
+ keys = rte_malloc(NULL, sizeof(uint32_t) * nb_entries, 0);
+
+ if (keys == NULL) {
+ printf("RTE_MALLOC failed\n");
+ goto err1;
+ }
+
+ found = rte_zmalloc(NULL, sizeof(uint32_t) * nb_entries, 0);
+ if (found == NULL) {
+ printf("RTE_ZMALLOC failed\n");
+ goto err2;
+ }
+
+ for (i = 0; i < nb_entries; i++)
+ keys[i] = i;
+
+ tbl_multiwriter_test_params.keys = keys;
+ tbl_multiwriter_test_params.found = found;
+
+ rte_atomic64_init(&gcycles);
+ rte_atomic64_clear(&gcycles);
+
+ rte_atomic64_init(&ginsertions);
+ rte_atomic64_clear(&ginsertions);
+
+ /* Fire all threads. */
+ rte_eal_mp_remote_launch(test_hash_multiwriter_worker,
+ NULL, CALL_MASTER);
+ rte_eal_mp_wait_lcore();
+
+ while (rte_hash_iterate(handle, &next_key, &next_data, &iter) >= 0) {
+ /* Search for the key in the list of keys added .*/
+ i = *(const uint32_t *)next_key;
+ tbl_multiwriter_test_params.found[i]++;
+ }
+
+ for (i = 0; i < rounded_nb_total_tsx_insertion; i++) {
+ if (tbl_multiwriter_test_params.keys[i]
+ != RTE_APP_TEST_HASH_MULTIWRITER_FAILED) {
+ if (tbl_multiwriter_test_params.found[i] > 1) {
+ duplicated_keys++;
+ break;
+ }
+ if (tbl_multiwriter_test_params.found[i] == 0) {
+ lost_keys++;
+ printf("key %d is lost\n", i);
+ break;
+ }
+ }
+ }
+
+ if (duplicated_keys > 0) {
+ printf("%d key duplicated\n", duplicated_keys);
+ goto err3;
+ }
+
+ if (lost_keys > 0) {
+ printf("%d key lost\n", lost_keys);
+ goto err3;
+ }
+
+ printf("No key corrupted during multiwriter insertion.\n");
+
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gcycles)/
+ rte_atomic64_read(&ginsertions);
+
+ printf(" cycles per insertion: %llu\n", cycles_per_insertion);
+
+ rte_free(tbl_multiwriter_test_params.found);
+ rte_free(tbl_multiwriter_test_params.keys);
+ rte_hash_free(handle);
+ return 0;
+
+err3:
+ rte_free(tbl_multiwriter_test_params.found);
+err2:
+ rte_free(tbl_multiwriter_test_params.keys);
+err1:
+ rte_hash_free(handle);
+ return -1;
+}
+
+static int
+test_hash_multiwriter_main(void)
+{
+ int r = -1;
+
+ if (rte_lcore_count() == 1) {
+ printf("More than one lcore is required to do multiwriter test\n");
+ return 0;
+ }
+
+
+ setlocale(LC_NUMERIC, "");
+
+
+ if (!rte_tm_supported()) {
+ printf("Hardware transactional memory (lock elision) "
+ "is NOT supported\n");
+ } else {
+ printf("Hardware transactional memory (lock elision) "
+ "is supported\n");
+
+ printf("Test multi-writer with Hardware transactional memory\n");
+
+ use_htm = 1;
+ r = test_hash_multiwriter();
+ }
+
+ printf("Test multi-writer without Hardware transactional memory\n");
+ use_htm = 0;
+ r = test_hash_multiwriter();
+
+ return r;
+}
+
+
+static struct test_command hash_scaling_cmd = {
+ .command = "hash_multiwriter_autotest",
+ .callback = test_hash_multiwriter_main,
+};
+
+REGISTER_TEST_COMMAND(hash_scaling_cmd);
diff --git a/app/test/test_interrupts.c b/app/test/test_interrupts.c
index 6e3dec31..df6d2618 100644
--- a/app/test/test_interrupts.c
+++ b/app/test/test_interrupts.c
@@ -41,7 +41,7 @@
#include "test.h"
-#define TEST_INTERRUPT_CHECK_INTERVAL 1000 /* ms */
+#define TEST_INTERRUPT_CHECK_INTERVAL 100 /* ms */
/* predefined interrupt handle types */
enum test_interrupt_handle_type {
@@ -372,7 +372,7 @@ test_interrupt_full_path_check(enum test_interrupt_handle_type intr_type)
if (test_interrupt_trigger_interrupt() < 0)
return -1;
- /* check flag in 3 seconds */
+ /* check flag */
for (count = 0; flag == 0 && count < 3; count++)
rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 7cbc2891..eeb13954 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -83,7 +83,7 @@
#define MAX_PKT_BURST (512)
#define DEF_PKT_BURST (16)
-#define BONDED_DEV_NAME ("unit_test_bonded_device")
+#define BONDED_DEV_NAME ("unit_test_bond_dev")
#define INVALID_SOCKET_ID (-1)
#define INVALID_PORT_ID (-1)
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index 31640cd6..a3f1f088 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -151,6 +151,8 @@ static struct rte_eth_conf default_pmd_conf = {
.lpbk_mode = 0,
};
+static uint8_t lacpdu_rx_count[RTE_MAX_ETHPORTS] = {0, };
+
#define FOR_EACH(_i, _item, _array, _size) \
for (_i = 0, _item = &_array[0]; _i < _size && (_item = &_array[_i]); _i++)
@@ -320,8 +322,26 @@ remove_slave(struct slave_conf *slave)
return 0;
}
+static void
+lacp_recv_cb(uint8_t slave_id, struct rte_mbuf *lacp_pkt)
+{
+ struct ether_hdr *hdr;
+ struct slow_protocol_frame *slow_hdr;
+
+ RTE_VERIFY(lacp_pkt != NULL);
+
+ hdr = rte_pktmbuf_mtod(lacp_pkt, struct ether_hdr *);
+ RTE_VERIFY(hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_SLOW));
+
+ slow_hdr = rte_pktmbuf_mtod(lacp_pkt, struct slow_protocol_frame *);
+ RTE_VERIFY(slow_hdr->slow_protocol.subtype == SLOW_SUBTYPE_LACP);
+
+ lacpdu_rx_count[slave_id]++;
+ rte_pktmbuf_free(lacp_pkt);
+}
+
static int
-initialize_bonded_device_with_slaves(uint8_t slave_count, uint8_t start)
+initialize_bonded_device_with_slaves(uint8_t slave_count, uint8_t external_sm)
{
uint8_t i;
@@ -337,9 +357,17 @@ initialize_bonded_device_with_slaves(uint8_t slave_count, uint8_t start)
rte_eth_bond_8023ad_setup(test_params.bonded_port_id, NULL);
rte_eth_promiscuous_disable(test_params.bonded_port_id);
- if (start)
- TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params.bonded_port_id),
- "Failed to start bonded device");
+ if (external_sm) {
+ struct rte_eth_bond_8023ad_conf conf;
+
+ rte_eth_bond_8023ad_conf_get(test_params.bonded_port_id, &conf);
+ conf.slowrx_cb = lacp_recv_cb;
+ rte_eth_bond_8023ad_setup(test_params.bonded_port_id, &conf);
+
+ }
+
+ TEST_ASSERT_SUCCESS(rte_eth_dev_start(test_params.bonded_port_id),
+ "Failed to start bonded device");
return TEST_SUCCESS;
}
@@ -640,7 +668,7 @@ test_mode4_lacp(void)
{
int retval;
- retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 1);
+ retval = initialize_bonded_device_with_slaves(TEST_LACP_SLAVE_COUT, 0);
TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
/* Test LACP handshake function */
@@ -738,7 +766,8 @@ test_mode4_rx(void)
struct ether_addr dst_mac;
struct ether_addr bonded_mac;
- retval = initialize_bonded_device_with_slaves(TEST_PROMISC_SLAVE_COUNT, 1);
+ retval = initialize_bonded_device_with_slaves(TEST_PROMISC_SLAVE_COUNT,
+ 0);
TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
retval = bond_handshake();
@@ -915,7 +944,7 @@ test_mode4_tx_burst(void)
struct ether_addr dst_mac = { { 0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 } };
struct ether_addr bonded_mac;
- retval = initialize_bonded_device_with_slaves(TEST_TX_SLAVE_COUNT, 1);
+ retval = initialize_bonded_device_with_slaves(TEST_TX_SLAVE_COUNT, 0);
TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
retval = bond_handshake();
@@ -1099,7 +1128,8 @@ test_mode4_marker(void)
uint8_t i, j;
const uint16_t ethtype_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
- retval = initialize_bonded_device_with_slaves(TEST_MARKER_SLAVE_COUT, 1);
+ retval = initialize_bonded_device_with_slaves(TEST_MARKER_SLAVE_COUT,
+ 0);
TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
/* Test LACP handshake function */
@@ -1184,7 +1214,8 @@ test_mode4_expired(void)
struct rte_eth_bond_8023ad_conf conf;
- retval = initialize_bonded_device_with_slaves(TEST_EXPIRED_SLAVE_COUNT, 1);
+ retval = initialize_bonded_device_with_slaves(TEST_EXPIRED_SLAVE_COUNT,
+ 0);
/* Set custom timeouts to make test last shorter. */
rte_eth_bond_8023ad_conf_get(test_params.bonded_port_id, &conf);
conf.fast_periodic_ms = 100;
@@ -1266,6 +1297,156 @@ test_mode4_expired(void)
}
static int
+test_mode4_ext_ctrl(void)
+{
+ /*
+ * configure bonded interface without the external sm enabled
+ * . try to transmit lacpdu (should fail)
+ * . try to set collecting and distributing flags (should fail)
+ * reconfigure w/external sm
+ * . transmit one lacpdu on each slave using new api
+ * . make sure each slave receives one lacpdu using the callback api
+ * . transmit one data pdu on each slave (should fail)
+ * . enable distribution and collection, send one data pdu each again
+ */
+
+ int retval;
+ struct slave_conf *slave = NULL;
+ uint8_t i;
+
+ struct rte_mbuf *lacp_tx_buf[SLAVE_COUNT];
+ struct ether_addr src_mac, dst_mac;
+ struct lacpdu_header lacpdu = {
+ .lacpdu = {
+ .subtype = SLOW_SUBTYPE_LACP,
+ },
+ };
+
+ ether_addr_copy(&parnter_system, &src_mac);
+ ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);
+
+ initialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,
+ ETHER_TYPE_SLOW, 0, 0);
+
+ for (i = 0; i < SLAVE_COUNT; i++) {
+ lacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool);
+ rte_memcpy(rte_pktmbuf_mtod(lacp_tx_buf[i], char *),
+ &lacpdu, sizeof(lacpdu));
+ rte_pktmbuf_pkt_len(lacp_tx_buf[i]) = sizeof(lacpdu);
+ }
+
+ retval = initialize_bonded_device_with_slaves(TEST_TX_SLAVE_COUNT, 0);
+ TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
+
+ FOR_EACH_SLAVE(i, slave) {
+ TEST_ASSERT_FAIL(rte_eth_bond_8023ad_ext_slowtx(
+ test_params.bonded_port_id,
+ slave->port_id, lacp_tx_buf[i]),
+ "Slave should not allow manual LACP xmit");
+ TEST_ASSERT_FAIL(rte_eth_bond_8023ad_ext_collect(
+ test_params.bonded_port_id,
+ slave->port_id, 1),
+ "Slave should not allow external state controls");
+ }
+
+ free_pkts(lacp_tx_buf, RTE_DIM(lacp_tx_buf));
+
+ retval = remove_slaves_and_stop_bonded_device();
+ TEST_ASSERT_SUCCESS(retval, "Bonded device cleanup failed.");
+
+ return TEST_SUCCESS;
+}
+
+
+static int
+test_mode4_ext_lacp(void)
+{
+ int retval;
+ struct slave_conf *slave = NULL;
+ uint8_t all_slaves_done = 0, i;
+ uint16_t nb_pkts;
+ const unsigned int delay = bond_get_update_timeout_ms();
+
+ struct rte_mbuf *lacp_tx_buf[SLAVE_COUNT];
+ struct rte_mbuf *buf[SLAVE_COUNT];
+ struct ether_addr src_mac, dst_mac;
+ struct lacpdu_header lacpdu = {
+ .lacpdu = {
+ .subtype = SLOW_SUBTYPE_LACP,
+ },
+ };
+
+ ether_addr_copy(&parnter_system, &src_mac);
+ ether_addr_copy(&slow_protocol_mac_addr, &dst_mac);
+
+ initialize_eth_header(&lacpdu.eth_hdr, &src_mac, &dst_mac,
+ ETHER_TYPE_SLOW, 0, 0);
+
+ for (i = 0; i < SLAVE_COUNT; i++) {
+ lacp_tx_buf[i] = rte_pktmbuf_alloc(test_params.mbuf_pool);
+ rte_memcpy(rte_pktmbuf_mtod(lacp_tx_buf[i], char *),
+ &lacpdu, sizeof(lacpdu));
+ rte_pktmbuf_pkt_len(lacp_tx_buf[i]) = sizeof(lacpdu);
+ }
+
+ retval = initialize_bonded_device_with_slaves(TEST_TX_SLAVE_COUNT, 1);
+ TEST_ASSERT_SUCCESS(retval, "Failed to initialize bonded device");
+
+ memset(lacpdu_rx_count, 0, sizeof(lacpdu_rx_count));
+
+ /* Wait for new settings to be applied. */
+ for (i = 0; i < 30; ++i)
+ rte_delay_ms(delay);
+
+ FOR_EACH_SLAVE(i, slave) {
+ retval = rte_eth_bond_8023ad_ext_slowtx(
+ test_params.bonded_port_id,
+ slave->port_id, lacp_tx_buf[i]);
+ TEST_ASSERT_SUCCESS(retval,
+ "Slave should allow manual LACP xmit");
+ }
+
+ nb_pkts = bond_tx(NULL, 0);
+ TEST_ASSERT_EQUAL(nb_pkts, 0, "Packets transmitted unexpectedly");
+
+ FOR_EACH_SLAVE(i, slave) {
+ nb_pkts = slave_get_pkts(slave, buf, RTE_DIM(buf));
+ TEST_ASSERT_EQUAL(nb_pkts, 1, "found %u packets on slave %d\n",
+ nb_pkts, i);
+ slave_put_pkts(slave, buf, nb_pkts);
+ }
+
+ nb_pkts = bond_rx(buf, RTE_DIM(buf));
+ free_pkts(buf, nb_pkts);
+ TEST_ASSERT_EQUAL(nb_pkts, 0, "Packets received unexpectedly");
+
+ /* wait for the periodic callback to run */
+ for (i = 0; i < 30 && all_slaves_done == 0; ++i) {
+ uint8_t s, total = 0;
+
+ rte_delay_ms(delay);
+ FOR_EACH_SLAVE(s, slave) {
+ total += lacpdu_rx_count[slave->port_id];
+ }
+
+ if (total >= SLAVE_COUNT)
+ all_slaves_done = 1;
+ }
+
+ FOR_EACH_SLAVE(i, slave) {
+ TEST_ASSERT_EQUAL(lacpdu_rx_count[slave->port_id], 1,
+ "Slave port %u should have received 1 lacpdu (count=%u)",
+ slave->port_id,
+ lacpdu_rx_count[slave->port_id]);
+ }
+
+ retval = remove_slaves_and_stop_bonded_device();
+ TEST_ASSERT_SUCCESS(retval, "Test cleanup failed.");
+
+ return TEST_SUCCESS;
+}
+
+static int
check_environment(void)
{
struct slave_conf *port;
@@ -1381,6 +1562,18 @@ test_mode4_expired_wrapper(void)
return test_mode4_executor(&test_mode4_expired);
}
+static int
+test_mode4_ext_ctrl_wrapper(void)
+{
+ return test_mode4_executor(&test_mode4_ext_ctrl);
+}
+
+static int
+test_mode4_ext_lacp_wrapper(void)
+{
+ return test_mode4_executor(&test_mode4_ext_lacp);
+}
+
static struct unit_test_suite link_bonding_mode4_test_suite = {
.suite_name = "Link Bonding mode 4 Unit Test Suite",
.setup = test_setup,
@@ -1391,6 +1584,10 @@ static struct unit_test_suite link_bonding_mode4_test_suite = {
TEST_CASE_NAMED("test_mode4_tx_burst", test_mode4_tx_burst_wrapper),
TEST_CASE_NAMED("test_mode4_marker", test_mode4_marker_wrapper),
TEST_CASE_NAMED("test_mode4_expired", test_mode4_expired_wrapper),
+ TEST_CASE_NAMED("test_mode4_ext_ctrl",
+ test_mode4_ext_ctrl_wrapper),
+ TEST_CASE_NAMED("test_mode4_ext_lacp",
+ test_mode4_ext_lacp_wrapper),
TEST_CASES_END() /**< NULL terminate unit test array */
}
diff --git a/app/test/test_logs.c b/app/test/test_logs.c
index 18a3b6aa..d0a99623 100644
--- a/app/test/test_logs.c
+++ b/app/test/test_logs.c
@@ -65,27 +65,23 @@ test_logs(void)
rte_set_log_type(RTE_LOGTYPE_TESTAPP1, 1);
rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 1);
- /* log in debug level */
- rte_set_log_level(RTE_LOG_DEBUG);
- RTE_LOG(DEBUG, TESTAPP1, "this is a debug level message\n");
- RTE_LOG(INFO, TESTAPP1, "this is a info level message\n");
- RTE_LOG(WARNING, TESTAPP1, "this is a warning level message\n");
+ /* log in error level */
+ rte_set_log_level(RTE_LOG_ERR);
+ RTE_LOG(ERR, TESTAPP1, "error message\n");
+ RTE_LOG(CRIT, TESTAPP1, "critical message\n");
- /* log in info level */
- rte_set_log_level(RTE_LOG_INFO);
- RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n");
- RTE_LOG(INFO, TESTAPP2, "this is a info level message\n");
- RTE_LOG(WARNING, TESTAPP2, "this is a warning level message\n");
+ /* log in critical level */
+ rte_set_log_level(RTE_LOG_CRIT);
+ RTE_LOG(ERR, TESTAPP2, "error message (not displayed)\n");
+ RTE_LOG(CRIT, TESTAPP2, "critical message\n");
/* disable one log type */
rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 0);
- /* log in debug level */
- rte_set_log_level(RTE_LOG_DEBUG);
- RTE_LOG(DEBUG, TESTAPP1, "this is a debug level message\n");
- RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n");
-
- rte_log_dump_history(stdout);
+ /* log in error level */
+ rte_set_log_level(RTE_LOG_ERR);
+ RTE_LOG(ERR, TESTAPP1, "error message\n");
+ RTE_LOG(ERR, TESTAPP2, "error message (not displayed)\n");
return 0;
}
diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c
index 40fbbc65..f6930fbd 100644
--- a/app/test/test_lpm.c
+++ b/app/test/test_lpm.c
@@ -34,20 +34,10 @@
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_memory.h>
-#include <rte_random.h>
-#include <rte_branch_prediction.h>
-#include <rte_ip.h>
-#include <time.h>
+#include <rte_lpm.h>
#include "test.h"
-
-#include "rte_lpm.h"
#include "test_lpm_routes.h"
#include "test_xmmt_ops.h"
@@ -78,7 +68,6 @@ static int32_t test14(void);
static int32_t test15(void);
static int32_t test16(void);
static int32_t test17(void);
-static int32_t perf_test(void);
rte_lpm_test tests[] = {
/* Test Cases */
@@ -100,7 +89,6 @@ rte_lpm_test tests[] = {
test15,
test16,
test17,
- perf_test,
};
#define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
@@ -1230,202 +1218,7 @@ test17(void)
}
/*
- * Lookup performance test
- */
-
-#define ITERATIONS (1 << 10)
-#define BATCH_SIZE (1 << 12)
-#define BULK_SIZE 32
-
-static void
-print_route_distribution(const struct route_rule *table, uint32_t n)
-{
- unsigned i, j;
-
- printf("Route distribution per prefix width: \n");
- printf("DEPTH QUANTITY (PERCENT)\n");
- printf("--------------------------- \n");
-
- /* Count depths. */
- for (i = 1; i <= 32; i++) {
- unsigned depth_counter = 0;
- double percent_hits;
-
- for (j = 0; j < n; j++)
- if (table[j].depth == (uint8_t) i)
- depth_counter++;
-
- percent_hits = ((double)depth_counter)/((double)n) * 100;
- printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
- }
- printf("\n");
-}
-
-int32_t
-perf_test(void)
-{
- struct rte_lpm *lpm = NULL;
- struct rte_lpm_config config;
-
- config.max_rules = 1000000;
- config.number_tbl8s = NUMBER_TBL8S;
- config.flags = 0;
- uint64_t begin, total_time, lpm_used_entries = 0;
- unsigned i, j;
- uint32_t next_hop_add = 0xAA, next_hop_return = 0;
- int status = 0;
- uint64_t cache_line_counter = 0;
- int64_t count = 0;
-
- rte_srand(rte_rdtsc());
-
- printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
-
- print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
-
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
- TEST_LPM_ASSERT(lpm != NULL);
-
- /* Measue add. */
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- if (rte_lpm_add(lpm, large_route_table[i].ip,
- large_route_table[i].depth, next_hop_add) == 0)
- status++;
- }
- /* End Timer. */
- total_time = rte_rdtsc() - begin;
-
- printf("Unique added entries = %d\n", status);
- /* Obtain add statistics. */
- for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
- if (lpm->tbl24[i].valid)
- lpm_used_entries++;
-
- if (i % 32 == 0) {
- if ((uint64_t)count < lpm_used_entries) {
- cache_line_counter++;
- count = lpm_used_entries;
- }
- }
- }
-
- printf("Used table 24 entries = %u (%g%%)\n",
- (unsigned) lpm_used_entries,
- (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
- printf("64 byte Cache entries used = %u (%u bytes)\n",
- (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
-
- printf("Average LPM Add: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
-
- /* Measure single Lookup */
- total_time = 0;
- count = 0;
-
- for (i = 0; i < ITERATIONS; i++) {
- static uint32_t ip_batch[BATCH_SIZE];
-
- for (j = 0; j < BATCH_SIZE; j++)
- ip_batch[j] = rte_rand();
-
- /* Lookup per batch */
- begin = rte_rdtsc();
-
- for (j = 0; j < BATCH_SIZE; j++) {
- if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
- count++;
- }
-
- total_time += rte_rdtsc() - begin;
-
- }
- printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Measure bulk Lookup */
- total_time = 0;
- count = 0;
- for (i = 0; i < ITERATIONS; i++) {
- static uint32_t ip_batch[BATCH_SIZE];
- uint32_t next_hops[BULK_SIZE];
-
- /* Create array of random IP addresses */
- for (j = 0; j < BATCH_SIZE; j++)
- ip_batch[j] = rte_rand();
-
- /* Lookup per batch */
- begin = rte_rdtsc();
- for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
- unsigned k;
- rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
- for (k = 0; k < BULK_SIZE; k++)
- if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
- count++;
- }
-
- total_time += rte_rdtsc() - begin;
- }
- printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Measure LookupX4 */
- total_time = 0;
- count = 0;
- for (i = 0; i < ITERATIONS; i++) {
- static uint32_t ip_batch[BATCH_SIZE];
- uint32_t next_hops[4];
-
- /* Create array of random IP addresses */
- for (j = 0; j < BATCH_SIZE; j++)
- ip_batch[j] = rte_rand();
-
- /* Lookup per batch */
- begin = rte_rdtsc();
- for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
- unsigned k;
- xmm_t ipx4;
-
- ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
- ipx4 = *(xmm_t *)(ip_batch + j);
- rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
- for (k = 0; k < RTE_DIM(next_hops); k++)
- if (unlikely(next_hops[k] == UINT32_MAX))
- count++;
- }
-
- total_time += rte_rdtsc() - begin;
- }
- printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Delete */
- status = 0;
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- /* rte_lpm_delete(lpm, ip, depth) */
- status += rte_lpm_delete(lpm, large_route_table[i].ip,
- large_route_table[i].depth);
- }
-
- total_time += rte_rdtsc() - begin;
-
- printf("Average LPM Delete: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
-
- rte_lpm_delete_all(lpm);
- rte_lpm_free(lpm);
-
- return PASS;
-}
-
-/*
- * Do all unit and performance tests.
+ * Do all unit tests.
*/
static int
diff --git a/app/test/test_lpm6.c b/app/test/test_lpm6.c
index b464342c..458a10bf 100644
--- a/app/test/test_lpm6.c
+++ b/app/test/test_lpm6.c
@@ -30,25 +30,16 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <time.h>
-#include "test.h"
-
-#include <rte_common.h>
-#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_random.h>
-#include <rte_branch_prediction.h>
-#include <rte_ip.h>
+#include <rte_lpm6.h>
-#include "rte_lpm6.h"
+#include "test.h"
#include "test_lpm6_routes.h"
#define TEST_LPM_ASSERT(cond) do { \
@@ -88,7 +79,6 @@ static int32_t test24(void);
static int32_t test25(void);
static int32_t test26(void);
static int32_t test27(void);
-static int32_t perf_test(void);
rte_lpm6_test tests6[] = {
/* Test Cases */
@@ -120,12 +110,9 @@ rte_lpm6_test tests6[] = {
test25,
test26,
test27,
- perf_test,
};
#define NUM_LPM6_TESTS (sizeof(tests6)/sizeof(tests6[0]))
-#define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24)
-#define RTE_LPM6_LOOKUP_SUCCESS 0x04000000
#define MAX_DEPTH 128
#define MAX_RULES 1000000
#define NUMBER_TBL8S (1 << 16)
@@ -231,7 +218,7 @@ test1(void)
}
/*
- * Create lpm table then delete lpm table 100 times
+ * Create lpm table then delete lpm table 20 times
* Use a slightly different rules size each time
*/
int32_t
@@ -245,7 +232,7 @@ test2(void)
config.flags = 0;
/* rte_lpm6_free: Free NULL */
- for (i = 0; i < 100; i++) {
+ for (i = 0; i < 20; i++) {
config.max_rules = MAX_RULES - i;
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
@@ -704,7 +691,7 @@ test13(void)
}
/*
- * Add 2^16 routes with different first 16 bits and depth 25.
+ * Add 2^12 routes with different first 12 bits and depth 25.
* Add one more route with the same depth and check that results in a failure.
* After that delete the last rule and create the one that was attempted to be
* created. This checks tbl8 exhaustion.
@@ -717,10 +704,10 @@ test14(void)
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
uint8_t depth = 25, next_hop_add = 100;
int32_t status = 0;
- int i, j;
+ int i;
config.max_rules = MAX_RULES;
- config.number_tbl8s = NUMBER_TBL8S;
+ config.number_tbl8s = 256;
config.flags = 0;
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
@@ -728,28 +715,22 @@ test14(void)
for (i = 0; i < 256; i++) {
ip[0] = (uint8_t)i;
- for (j = 0; j < 256; j++) {
- ip[1] = (uint8_t)j;
- status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
- TEST_LPM_ASSERT(status == 0);
- }
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
}
ip[0] = 255;
- ip[1] = 255;
- ip[2] = 1;
+ ip[1] = 1;
status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
TEST_LPM_ASSERT(status == -ENOSPC);
ip[0] = 255;
- ip[1] = 255;
- ip[2] = 0;
+ ip[1] = 0;
status = rte_lpm6_delete(lpm, ip, depth);
TEST_LPM_ASSERT(status == 0);
ip[0] = 255;
- ip[1] = 255;
- ip[2] = 1;
+ ip[1] = 1;
status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
TEST_LPM_ASSERT(status == 0);
@@ -858,7 +839,7 @@ test17(void)
TEST_LPM_ASSERT(lpm != NULL);
/* Loop with rte_lpm6_add. */
- for (depth = 1; depth <= 128; depth++) {
+ for (depth = 1; depth <= 16; depth++) {
/* Let the next_hop_add value = depth. Just for change. */
next_hop_add = depth;
@@ -875,7 +856,7 @@ test17(void)
}
/* Loop with rte_lpm6_delete. */
- for (depth = 128; depth >= 1; depth--) {
+ for (depth = 16; depth >= 1; depth--) {
next_hop_add = (uint8_t) (depth - 1);
status = rte_lpm6_delete(lpm, ip2, depth);
@@ -1504,7 +1485,7 @@ test22(void)
/*
* Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
- * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
+ * lookup (miss) in a for loop of 30 times. This will check tbl8 extension
* and contraction.
*/
int32_t
@@ -1528,7 +1509,7 @@ test23(void)
depth = 128;
next_hop_add = 100;
- for (i = 0; i < 1000; i++) {
+ for (i = 0; i < 30; i++) {
status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
TEST_LPM_ASSERT(status == 0);
@@ -1762,143 +1743,7 @@ test27(void)
}
/*
- * Lookup performance test
- */
-
-#define ITERATIONS (1 << 10)
-#define BATCH_SIZE 100000
-
-static void
-print_route_distribution(const struct rules_tbl_entry *table, uint32_t n)
-{
- unsigned i, j;
-
- printf("Route distribution per prefix width: \n");
- printf("DEPTH QUANTITY (PERCENT)\n");
- printf("--------------------------- \n");
-
- /* Count depths. */
- for(i = 1; i <= 128; i++) {
- unsigned depth_counter = 0;
- double percent_hits;
-
- for (j = 0; j < n; j++)
- if (table[j].depth == (uint8_t) i)
- depth_counter++;
-
- percent_hits = ((double)depth_counter)/((double)n) * 100;
- printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
- }
- printf("\n");
-}
-
-int32_t
-perf_test(void)
-{
- struct rte_lpm6 *lpm = NULL;
- struct rte_lpm6_config config;
- uint64_t begin, total_time;
- unsigned i, j;
- uint8_t next_hop_add = 0xAA, next_hop_return = 0;
- int status = 0;
- int64_t count = 0;
-
- config.max_rules = 1000000;
- config.number_tbl8s = NUMBER_TBL8S;
- config.flags = 0;
-
- rte_srand(rte_rdtsc());
-
- printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
-
- print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
-
- lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
- TEST_LPM_ASSERT(lpm != NULL);
-
- /* Measure add. */
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- if (rte_lpm6_add(lpm, large_route_table[i].ip,
- large_route_table[i].depth, next_hop_add) == 0)
- status++;
- }
- /* End Timer. */
- total_time = rte_rdtsc() - begin;
-
- printf("Unique added entries = %d\n", status);
- printf("Average LPM Add: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
-
- /* Measure single Lookup */
- total_time = 0;
- count = 0;
-
- for (i = 0; i < ITERATIONS; i ++) {
- begin = rte_rdtsc();
-
- for (j = 0; j < NUM_IPS_ENTRIES; j ++) {
- if (rte_lpm6_lookup(lpm, large_ips_table[j].ip,
- &next_hop_return) != 0)
- count++;
- }
-
- total_time += rte_rdtsc() - begin;
-
- }
- printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Measure bulk Lookup */
- total_time = 0;
- count = 0;
-
- uint8_t ip_batch[NUM_IPS_ENTRIES][16];
- int16_t next_hops[NUM_IPS_ENTRIES];
-
- for (i = 0; i < NUM_IPS_ENTRIES; i++)
- memcpy(ip_batch[i], large_ips_table[i].ip, 16);
-
- for (i = 0; i < ITERATIONS; i ++) {
-
- /* Lookup per batch */
- begin = rte_rdtsc();
- rte_lpm6_lookup_bulk_func(lpm, ip_batch, next_hops, NUM_IPS_ENTRIES);
- total_time += rte_rdtsc() - begin;
-
- for (j = 0; j < NUM_IPS_ENTRIES; j++)
- if (next_hops[j] < 0)
- count++;
- }
- printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Delete */
- status = 0;
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- /* rte_lpm_delete(lpm, ip, depth) */
- status += rte_lpm6_delete(lpm, large_route_table[i].ip,
- large_route_table[i].depth);
- }
-
- total_time += rte_rdtsc() - begin;
-
- printf("Average LPM Delete: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
-
- rte_lpm6_delete_all(lpm);
- rte_lpm6_free(lpm);
-
- return PASS;
-}
-
-/*
- * Do all unit and performance tests.
+ * Do all unit tests.
*/
static int
test_lpm6(void)
@@ -1907,6 +1752,7 @@ test_lpm6(void)
int status = -1, global_status = 0;
for (i = 0; i < NUM_LPM6_TESTS; i++) {
+ printf("# test %02d\n", i);
status = tests6[i]();
if (status < 0) {
diff --git a/app/test/test_lpm6_perf.c b/app/test/test_lpm6_perf.c
new file mode 100644
index 00000000..b7d46314
--- /dev/null
+++ b/app/test/test_lpm6_perf.c
@@ -0,0 +1,191 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_memory.h>
+#include <rte_lpm6.h>
+
+#include "test.h"
+#include "test_lpm6_routes.h"
+
+#define TEST_LPM_ASSERT(cond) do { \
+ if (!(cond)) { \
+ printf("Error at line %d: \n", __LINE__); \
+ return -1; \
+ } \
+} while(0)
+
+#define ITERATIONS (1 << 10)
+#define BATCH_SIZE 100000
+#define NUMBER_TBL8S (1 << 16)
+
+static void
+print_route_distribution(const struct rules_tbl_entry *table, uint32_t n)
+{
+ unsigned i, j;
+
+ printf("Route distribution per prefix width: \n");
+ printf("DEPTH QUANTITY (PERCENT)\n");
+ printf("--------------------------- \n");
+
+ /* Count depths. */
+ for(i = 1; i <= 128; i++) {
+ unsigned depth_counter = 0;
+ double percent_hits;
+
+ for (j = 0; j < n; j++)
+ if (table[j].depth == (uint8_t) i)
+ depth_counter++;
+
+ percent_hits = ((double)depth_counter)/((double)n) * 100;
+ printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
+ }
+ printf("\n");
+}
+
+static int
+test_lpm6_perf(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint64_t begin, total_time;
+ unsigned i, j;
+ uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+ int status = 0;
+ int64_t count = 0;
+
+ config.max_rules = 1000000;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ rte_srand(rte_rdtsc());
+
+ printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
+
+ print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Measure add. */
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ if (rte_lpm6_add(lpm, large_route_table[i].ip,
+ large_route_table[i].depth, next_hop_add) == 0)
+ status++;
+ }
+ /* End Timer. */
+ total_time = rte_rdtsc() - begin;
+
+ printf("Unique added entries = %d\n", status);
+ printf("Average LPM Add: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ /* Measure single Lookup */
+ total_time = 0;
+ count = 0;
+
+ for (i = 0; i < ITERATIONS; i ++) {
+ begin = rte_rdtsc();
+
+ for (j = 0; j < NUM_IPS_ENTRIES; j ++) {
+ if (rte_lpm6_lookup(lpm, large_ips_table[j].ip,
+ &next_hop_return) != 0)
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ }
+ printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure bulk Lookup */
+ total_time = 0;
+ count = 0;
+
+ uint8_t ip_batch[NUM_IPS_ENTRIES][16];
+ int16_t next_hops[NUM_IPS_ENTRIES];
+
+ for (i = 0; i < NUM_IPS_ENTRIES; i++)
+ memcpy(ip_batch[i], large_ips_table[i].ip, 16);
+
+ for (i = 0; i < ITERATIONS; i ++) {
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ rte_lpm6_lookup_bulk_func(lpm, ip_batch, next_hops, NUM_IPS_ENTRIES);
+ total_time += rte_rdtsc() - begin;
+
+ for (j = 0; j < NUM_IPS_ENTRIES; j++)
+ if (next_hops[j] < 0)
+ count++;
+ }
+ printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Delete */
+ status = 0;
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ /* rte_lpm_delete(lpm, ip, depth) */
+ status += rte_lpm6_delete(lpm, large_route_table[i].ip,
+ large_route_table[i].depth);
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ printf("Average LPM Delete: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ rte_lpm6_delete_all(lpm);
+ rte_lpm6_free(lpm);
+
+ return 0;
+}
+
+static struct test_command lpm6_perf_cmd = {
+ .command = "lpm6_perf_autotest",
+ .callback = test_lpm6_perf,
+};
+REGISTER_TEST_COMMAND(lpm6_perf_cmd);
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
new file mode 100644
index 00000000..41da811c
--- /dev/null
+++ b/app/test/test_lpm_perf.c
@@ -0,0 +1,249 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_branch_prediction.h>
+#include <rte_lpm.h>
+
+#include "test.h"
+#include "test_lpm_routes.h"
+#include "test_xmmt_ops.h"
+
+#define TEST_LPM_ASSERT(cond) do { \
+ if (!(cond)) { \
+ printf("Error at line %d: \n", __LINE__); \
+ return -1; \
+ } \
+} while(0)
+
+#define ITERATIONS (1 << 10)
+#define BATCH_SIZE (1 << 12)
+#define BULK_SIZE 32
+
+static void
+print_route_distribution(const struct route_rule *table, uint32_t n)
+{
+ unsigned i, j;
+
+ printf("Route distribution per prefix width: \n");
+ printf("DEPTH QUANTITY (PERCENT)\n");
+ printf("--------------------------- \n");
+
+ /* Count depths. */
+ for (i = 1; i <= 32; i++) {
+ unsigned depth_counter = 0;
+ double percent_hits;
+
+ for (j = 0; j < n; j++)
+ if (table[j].depth == (uint8_t) i)
+ depth_counter++;
+
+ percent_hits = ((double)depth_counter)/((double)n) * 100;
+ printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
+ }
+ printf("\n");
+}
+
+static int
+test_lpm_perf(void)
+{
+ struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = 1000000;
+ config.number_tbl8s = 256;
+ config.flags = 0;
+ uint64_t begin, total_time, lpm_used_entries = 0;
+ unsigned i, j;
+ uint32_t next_hop_add = 0xAA, next_hop_return = 0;
+ int status = 0;
+ uint64_t cache_line_counter = 0;
+ int64_t count = 0;
+
+ rte_srand(rte_rdtsc());
+
+ printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
+
+ print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Measue add. */
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ if (rte_lpm_add(lpm, large_route_table[i].ip,
+ large_route_table[i].depth, next_hop_add) == 0)
+ status++;
+ }
+ /* End Timer. */
+ total_time = rte_rdtsc() - begin;
+
+ printf("Unique added entries = %d\n", status);
+ /* Obtain add statistics. */
+ for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
+ if (lpm->tbl24[i].valid)
+ lpm_used_entries++;
+
+ if (i % 32 == 0) {
+ if ((uint64_t)count < lpm_used_entries) {
+ cache_line_counter++;
+ count = lpm_used_entries;
+ }
+ }
+ }
+
+ printf("Used table 24 entries = %u (%g%%)\n",
+ (unsigned) lpm_used_entries,
+ (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
+ printf("64 byte Cache entries used = %u (%u bytes)\n",
+ (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
+
+ printf("Average LPM Add: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ /* Measure single Lookup */
+ total_time = 0;
+ count = 0;
+
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+
+ for (j = 0; j < BATCH_SIZE; j++) {
+ if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ }
+ printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure bulk Lookup */
+ total_time = 0;
+ count = 0;
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+ uint32_t next_hops[BULK_SIZE];
+
+ /* Create array of random IP addresses */
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
+ unsigned k;
+ rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
+ for (k = 0; k < BULK_SIZE; k++)
+ if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+ }
+ printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure LookupX4 */
+ total_time = 0;
+ count = 0;
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+ uint32_t next_hops[4];
+
+ /* Create array of random IP addresses */
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
+ unsigned k;
+ xmm_t ipx4;
+
+ ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
+ ipx4 = *(xmm_t *)(ip_batch + j);
+ rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
+ for (k = 0; k < RTE_DIM(next_hops); k++)
+ if (unlikely(next_hops[k] == UINT32_MAX))
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+ }
+ printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Delete */
+ status = 0;
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ /* rte_lpm_delete(lpm, ip, depth) */
+ status += rte_lpm_delete(lpm, large_route_table[i].ip,
+ large_route_table[i].depth);
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ printf("Average LPM Delete: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ rte_lpm_delete_all(lpm);
+ rte_lpm_free(lpm);
+
+ return 0;
+}
+
+static struct test_command lpm_perf_cmd = {
+ .command = "lpm_perf_autotest",
+ .callback = test_lpm_perf,
+};
+REGISTER_TEST_COMMAND(lpm_perf_cmd);
diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index 98ff93af..8664885d 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -511,10 +511,14 @@ test_attach_from_different_pool(void)
rte_pktmbuf_detach(clone);
if (c_data != rte_pktmbuf_mtod(clone, char *))
GOTO_FAIL("clone was not detached properly\n");
+ if (rte_mbuf_refcnt_read(m) != 2)
+ GOTO_FAIL("invalid refcnt in m\n");
rte_pktmbuf_detach(clone2);
if (c_data2 != rte_pktmbuf_mtod(clone2, char *))
GOTO_FAIL("clone2 was not detached properly\n");
+ if (rte_mbuf_refcnt_read(m) != 1)
+ GOTO_FAIL("invalid refcnt in m\n");
/* free the clones and the initial mbuf */
rte_pktmbuf_free(clone2);
@@ -713,7 +717,7 @@ test_refcnt_iter(unsigned lcore, unsigned iter)
* - increment it's reference up to N+1,
* - enqueue it N times into the ring for slave cores to free.
*/
- for (i = 0, n = rte_mempool_count(refcnt_pool);
+ for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
i++) {
ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
@@ -741,14 +745,14 @@ test_refcnt_iter(unsigned lcore, unsigned iter)
/* check that all mbufs are back into mempool by now */
for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
- if ((i = rte_mempool_count(refcnt_pool)) == n) {
+ if ((i = rte_mempool_avail_count(refcnt_pool)) == n) {
refcnt_lcore[lcore] += tref;
printf("%s(lcore=%u, iter=%u) completed, "
"%u references processed\n",
__func__, lcore, iter, tref);
return;
}
- rte_delay_ms(1000);
+ rte_delay_ms(100);
}
rte_panic("(lcore=%u, iter=%u): after %us only "
diff --git a/app/test/test_memcpy.c b/app/test/test_memcpy.c
index b2bb4e0e..8195e209 100644
--- a/app/test/test_memcpy.c
+++ b/app/test/test_memcpy.c
@@ -37,10 +37,7 @@
#include <stdlib.h>
#include <rte_common.h>
-#include <rte_cycles.h>
#include <rte_random.h>
-#include <rte_malloc.h>
-
#include <rte_memcpy.h>
#include "test.h"
@@ -65,18 +62,6 @@ static size_t buf_sizes[TEST_VALUE_RANGE];
#define SMALL_BUFFER_SIZE TEST_VALUE_RANGE
#endif /* TEST_VALUE_RANGE == 0 */
-
-/*
- * Arrays of this size are used for measuring uncached memory accesses by
- * picking a random location within the buffer. Make this smaller if there are
- * memory allocation errors.
- */
-#define LARGE_BUFFER_SIZE (100 * 1024 * 1024)
-
-/* How many times to run timing loop for performance tests */
-#define TEST_ITERATIONS 1000000
-#define TEST_BATCH_SIZE 100
-
/* Data is aligned on this many bytes (power of 2) */
#define ALIGNMENT_UNIT 32
diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index f0f823b9..9ea98314 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -71,19 +71,23 @@
* put them back in the pool.
*/
-#define N 65536
-#define TIME_S 5
#define MEMPOOL_ELT_SIZE 2048
-#define MAX_KEEP 128
+#define MAX_KEEP 16
#define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
-static struct rte_mempool *mp;
-static struct rte_mempool *mp_cache, *mp_nocache;
+#define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
+#define RET_ERR() do { \
+ LOG_ERR(); \
+ return -1; \
+ } while (0)
+#define GOTO_ERR(var, label) do { \
+ LOG_ERR(); \
+ var = -1; \
+ goto label; \
+ } while (0)
static rte_atomic32_t synchro;
-
-
/*
* save the object number in the first 4 bytes of object data. All
* other bytes are set to 0.
@@ -93,13 +97,14 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
void *obj, unsigned i)
{
uint32_t *objnum = obj;
+
memset(obj, 0, mp->elt_size);
*objnum = i;
}
/* basic tests (done on one core) */
static int
-test_mempool_basic(void)
+test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
{
uint32_t *objnum;
void **objtable;
@@ -107,48 +112,62 @@ test_mempool_basic(void)
char *obj_data;
int ret = 0;
unsigned i, j;
+ int offset;
+ struct rte_mempool_cache *cache;
+
+ if (use_external_cache) {
+ /* Create a user-owned mempool cache. */
+ cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
+ SOCKET_ID_ANY);
+ if (cache == NULL)
+ RET_ERR();
+ } else {
+ /* May be NULL if cache is disabled. */
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ }
/* dump the mempool status */
rte_mempool_dump(stdout, mp);
printf("get an object\n");
- if (rte_mempool_get(mp, &obj) < 0)
- return -1;
+ if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
+ GOTO_ERR(ret, out);
rte_mempool_dump(stdout, mp);
/* tests that improve coverage */
printf("get object count\n");
- if (rte_mempool_count(mp) != MEMPOOL_SIZE - 1)
- return -1;
+ /* We have to count the extra caches, one in this case. */
+ offset = use_external_cache ? 1 * cache->len : 0;
+ if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1)
+ GOTO_ERR(ret, out);
printf("get private data\n");
- if (rte_mempool_get_priv(mp) !=
- (char*) mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num))
- return -1;
+ if (rte_mempool_get_priv(mp) != (char *)mp +
+ MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
+ GOTO_ERR(ret, out);
+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
printf("get physical address of an object\n");
- if (MEMPOOL_IS_CONTIG(mp) &&
- rte_mempool_virt2phy(mp, obj) !=
- (phys_addr_t) (mp->phys_addr +
- (phys_addr_t) ((char*) obj - (char*) mp)))
- return -1;
+ if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
+ GOTO_ERR(ret, out);
+#endif
printf("put the object back\n");
- rte_mempool_put(mp, obj);
+ rte_mempool_generic_put(mp, &obj, 1, cache, 0);
rte_mempool_dump(stdout, mp);
printf("get 2 objects\n");
- if (rte_mempool_get(mp, &obj) < 0)
- return -1;
- if (rte_mempool_get(mp, &obj2) < 0) {
- rte_mempool_put(mp, obj);
- return -1;
+ if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
+ GOTO_ERR(ret, out);
+ if (rte_mempool_generic_get(mp, &obj2, 1, cache, 0) < 0) {
+ rte_mempool_generic_put(mp, &obj, 1, cache, 0);
+ GOTO_ERR(ret, out);
}
rte_mempool_dump(stdout, mp);
printf("put the objects back\n");
- rte_mempool_put(mp, obj);
- rte_mempool_put(mp, obj2);
+ rte_mempool_generic_put(mp, &obj, 1, cache, 0);
+ rte_mempool_generic_put(mp, &obj2, 1, cache, 0);
rte_mempool_dump(stdout, mp);
/*
@@ -156,12 +175,11 @@ test_mempool_basic(void)
* on other cores may not be empty.
*/
objtable = malloc(MEMPOOL_SIZE * sizeof(void *));
- if (objtable == NULL) {
- return -1;
- }
+ if (objtable == NULL)
+ GOTO_ERR(ret, out);
- for (i=0; i<MEMPOOL_SIZE; i++) {
- if (rte_mempool_get(mp, &objtable[i]) < 0)
+ for (i = 0; i < MEMPOOL_SIZE; i++) {
+ if (rte_mempool_generic_get(mp, &objtable[i], 1, cache, 0) < 0)
break;
}
@@ -174,22 +192,28 @@ test_mempool_basic(void)
obj_data = obj;
objnum = obj;
if (*objnum > MEMPOOL_SIZE) {
- printf("bad object number\n");
+ printf("bad object number(%d)\n", *objnum);
ret = -1;
break;
}
- for (j=sizeof(*objnum); j<mp->elt_size; j++) {
+ for (j = sizeof(*objnum); j < mp->elt_size; j++) {
if (obj_data[j] != 0)
ret = -1;
}
- rte_mempool_put(mp, objtable[i]);
+ rte_mempool_generic_put(mp, &objtable[i], 1, cache, 0);
}
free(objtable);
if (ret == -1)
printf("objects were modified!\n");
+out:
+ if (use_external_cache) {
+ rte_mempool_cache_flush(cache, mp);
+ rte_mempool_cache_free(cache);
+ }
+
return ret;
}
@@ -197,14 +221,17 @@ static int test_mempool_creation_with_exceeded_cache_size(void)
{
struct rte_mempool *mp_cov;
- mp_cov = rte_mempool_create("test_mempool_creation_with_exceeded_cache_size", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE,
- RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
- if(NULL != mp_cov) {
- return -1;
+ mp_cov = rte_mempool_create("test_mempool_cache_too_big",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_cov != NULL) {
+ rte_mempool_free(mp_cov);
+ RET_ERR();
}
return 0;
@@ -222,7 +249,7 @@ static int test_mempool_single_producer(void)
unsigned int i;
void *obj = NULL;
uint64_t start_cycles, end_cycles;
- uint64_t duration = rte_get_timer_hz() * 8;
+ uint64_t duration = rte_get_timer_hz() / 4;
start_cycles = rte_get_timer_cycles();
while (1) {
@@ -242,10 +269,10 @@ static int test_mempool_single_producer(void)
continue;
}
if (rte_mempool_from_obj(obj) != mp_spsc) {
- printf("test_mempool_single_producer there is an obj not owned by this mempool\n");
- return -1;
+ printf("obj not owned by this mempool\n");
+ RET_ERR();
}
- rte_mempool_sp_put(mp_spsc, obj);
+ rte_mempool_put(mp_spsc, obj);
rte_spinlock_lock(&scsp_spinlock);
scsp_obj_table[i] = NULL;
rte_spinlock_unlock(&scsp_spinlock);
@@ -262,7 +289,7 @@ static int test_mempool_single_consumer(void)
unsigned int i;
void * obj;
uint64_t start_cycles, end_cycles;
- uint64_t duration = rte_get_timer_hz() * 5;
+ uint64_t duration = rte_get_timer_hz() / 8;
start_cycles = rte_get_timer_cycles();
while (1) {
@@ -278,7 +305,7 @@ static int test_mempool_single_consumer(void)
rte_spinlock_unlock(&scsp_spinlock);
if (i >= MAX_KEEP)
continue;
- if (rte_mempool_sc_get(mp_spsc, &obj) < 0)
+ if (rte_mempool_get(mp_spsc, &obj) < 0)
break;
rte_spinlock_lock(&scsp_spinlock);
scsp_obj_table[i] = obj;
@@ -289,14 +316,17 @@ static int test_mempool_single_consumer(void)
}
/*
- * test function for mempool test based on singple consumer and single producer, can run on one lcore only
+ * test function for mempool test based on singple consumer and single producer,
+ * can run on one lcore only
*/
-static int test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
+static int
+test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
{
return test_mempool_single_consumer();
}
-static void my_mp_init(struct rte_mempool * mp, __attribute__((unused)) void * arg)
+static void
+my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg)
{
printf("mempool name is %s\n", mp->name);
/* nothing to be implemented here*/
@@ -314,33 +344,41 @@ test_mempool_sp_sc(void)
unsigned lcore_next;
/* create a mempool with single producer/consumer ring */
- if (NULL == mp_spsc) {
+ if (mp_spsc == NULL) {
mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- my_mp_init, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
- if (NULL == mp_spsc) {
- return -1;
- }
+ MEMPOOL_ELT_SIZE, 0, 0,
+ my_mp_init, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY,
+ MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT |
+ MEMPOOL_F_SC_GET);
+ if (mp_spsc == NULL)
+ RET_ERR();
}
if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
printf("Cannot lookup mempool from its name\n");
- return -1;
+ rte_mempool_free(mp_spsc);
+ RET_ERR();
}
lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
- if (RTE_MAX_LCORE <= lcore_next)
- return -1;
- if (rte_eal_lcore_role(lcore_next) != ROLE_RTE)
- return -1;
+ if (lcore_next >= RTE_MAX_LCORE) {
+ rte_mempool_free(mp_spsc);
+ RET_ERR();
+ }
+ if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) {
+ rte_mempool_free(mp_spsc);
+ RET_ERR();
+ }
rte_spinlock_init(&scsp_spinlock);
memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
- rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL, lcore_next);
- if(test_mempool_single_producer() < 0)
+ rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL,
+ lcore_next);
+ if (test_mempool_single_producer() < 0)
ret = -1;
- if(rte_eal_wait_lcore(lcore_next) < 0)
+ if (rte_eal_wait_lcore(lcore_next) < 0)
ret = -1;
+ rte_mempool_free(mp_spsc);
return ret;
}
@@ -349,7 +387,7 @@ test_mempool_sp_sc(void)
* it tests some more basic of mempool
*/
static int
-test_mempool_basic_ex(struct rte_mempool * mp)
+test_mempool_basic_ex(struct rte_mempool *mp)
{
unsigned i;
void **obj;
@@ -359,38 +397,41 @@ test_mempool_basic_ex(struct rte_mempool * mp)
if (mp == NULL)
return ret;
- obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE , sizeof(void *), 0);
+ obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE,
+ sizeof(void *), 0);
if (obj == NULL) {
printf("test_mempool_basic_ex fail to rte_malloc\n");
return ret;
}
- printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n", mp->name, rte_mempool_free_count(mp));
+ printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n",
+ mp->name, rte_mempool_in_use_count(mp));
if (rte_mempool_full(mp) != 1) {
- printf("test_mempool_basic_ex the mempool is not full but it should be\n");
+ printf("test_mempool_basic_ex the mempool should be full\n");
goto fail_mp_basic_ex;
}
for (i = 0; i < MEMPOOL_SIZE; i ++) {
- if (rte_mempool_mc_get(mp, &obj[i]) < 0) {
- printf("fail_mp_basic_ex fail to get mempool object for [%u]\n", i);
+ if (rte_mempool_get(mp, &obj[i]) < 0) {
+ printf("test_mp_basic_ex fail to get object for [%u]\n",
+ i);
goto fail_mp_basic_ex;
}
}
- if (rte_mempool_mc_get(mp, &err_obj) == 0) {
- printf("test_mempool_basic_ex get an impossible obj from mempool\n");
+ if (rte_mempool_get(mp, &err_obj) == 0) {
+ printf("test_mempool_basic_ex get an impossible obj\n");
goto fail_mp_basic_ex;
}
printf("number: %u\n", i);
if (rte_mempool_empty(mp) != 1) {
- printf("test_mempool_basic_ex the mempool is not empty but it should be\n");
+ printf("test_mempool_basic_ex the mempool should be empty\n");
goto fail_mp_basic_ex;
}
- for (i = 0; i < MEMPOOL_SIZE; i ++) {
- rte_mempool_mp_put(mp, obj[i]);
- }
+ for (i = 0; i < MEMPOOL_SIZE; i++)
+ rte_mempool_put(mp, obj[i]);
+
if (rte_mempool_full(mp) != 1) {
- printf("test_mempool_basic_ex the mempool is not full but it should be\n");
+ printf("test_mempool_basic_ex the mempool should be full\n");
goto fail_mp_basic_ex;
}
@@ -406,24 +447,30 @@ fail_mp_basic_ex:
static int
test_mempool_same_name_twice_creation(void)
{
- struct rte_mempool *mp_tc;
-
- mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (NULL == mp_tc)
- return -1;
-
- mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (NULL != mp_tc)
- return -1;
+ struct rte_mempool *mp_tc, *mp_tc2;
+
+ mp_tc = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ NULL, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_tc == NULL)
+ RET_ERR();
+
+ mp_tc2 = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ NULL, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_tc2 != NULL) {
+ rte_mempool_free(mp_tc);
+ rte_mempool_free(mp_tc2);
+ RET_ERR();
+ }
+ rte_mempool_free(mp_tc);
return 0;
}
@@ -444,7 +491,7 @@ test_mempool_xmem_misc(void)
usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1,
MEMPOOL_PG_SHIFT_MAX);
- if(sz != (size_t)usz) {
+ if (sz != (size_t)usz) {
printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
"returns: %#zx, while expected: %#zx;\n",
__func__, elt_num, total_size, sz, (size_t)usz);
@@ -457,68 +504,109 @@ test_mempool_xmem_misc(void)
static int
test_mempool(void)
{
+ struct rte_mempool *mp_cache = NULL;
+ struct rte_mempool *mp_nocache = NULL;
+ struct rte_mempool *mp_ext = NULL;
+ struct rte_mempool *mp_stack = NULL;
+
rte_atomic32_init(&synchro);
/* create a mempool (without cache) */
- if (mp_nocache == NULL)
- mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
- if (mp_nocache == NULL)
- return -1;
+ mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_nocache == NULL) {
+ printf("cannot allocate mp_nocache mempool\n");
+ goto err;
+ }
/* create a mempool (with cache) */
- if (mp_cache == NULL)
- mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE,
- RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
- if (mp_cache == NULL)
- return -1;
+ mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_cache == NULL) {
+ printf("cannot allocate mp_cache mempool\n");
+ goto err;
+ }
+ /* create a mempool with an external handler */
+ mp_stack = rte_mempool_create_empty("test_stack",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_stack == NULL) {
+ printf("cannot allocate mp_stack mempool\n");
+ goto err;
+ }
+ if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
+ printf("cannot set stack handler\n");
+ goto err;
+ }
+ if (rte_mempool_populate_default(mp_stack) < 0) {
+ printf("cannot populate mp_stack mempool\n");
+ goto err;
+ }
+ rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
/* retrieve the mempool from its name */
if (rte_mempool_lookup("test_nocache") != mp_nocache) {
printf("Cannot lookup mempool from its name\n");
- return -1;
+ goto err;
}
rte_mempool_list_dump(stdout);
/* basic tests without cache */
- mp = mp_nocache;
- if (test_mempool_basic() < 0)
- return -1;
+ if (test_mempool_basic(mp_nocache, 0) < 0)
+ goto err;
/* basic tests with cache */
- mp = mp_cache;
- if (test_mempool_basic() < 0)
- return -1;
+ if (test_mempool_basic(mp_cache, 0) < 0)
+ goto err;
+
+ /* basic tests with user-owned cache */
+ if (test_mempool_basic(mp_nocache, 1) < 0)
+ goto err;
/* more basic tests without cache */
if (test_mempool_basic_ex(mp_nocache) < 0)
- return -1;
+ goto err;
/* mempool operation test based on single producer and single comsumer */
if (test_mempool_sp_sc() < 0)
- return -1;
+ goto err;
if (test_mempool_creation_with_exceeded_cache_size() < 0)
- return -1;
+ goto err;
if (test_mempool_same_name_twice_creation() < 0)
- return -1;
+ goto err;
if (test_mempool_xmem_misc() < 0)
- return -1;
+ goto err;
+
+ /* test the stack handler */
+ if (test_mempool_basic(mp_stack, 1) < 0)
+ goto err;
rte_mempool_list_dump(stdout);
return 0;
+
+err:
+ rte_mempool_free(mp_nocache);
+ rte_mempool_free(mp_cache);
+ rte_mempool_free(mp_ext);
+ return -1;
}
static struct test_command mempool_cmd = {
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index cdc02a00..4fac04cd 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -78,6 +78,9 @@
* - One core without cache
* - Two cores without cache
* - Max. cores without cache
+ * - One core with user-owned cache
+ * - Two cores with user-owned cache
+ * - Max. cores with user-owned cache
*
* - Bulk size (*n_get_bulk*, *n_put_bulk*)
*
@@ -96,8 +99,21 @@
#define MAX_KEEP 128
#define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
+#define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
+#define RET_ERR() do { \
+ LOG_ERR(); \
+ return -1; \
+ } while (0)
+#define GOTO_ERR(var, label) do { \
+ LOG_ERR(); \
+ var = -1; \
+ goto label; \
+ } while (0)
+
static struct rte_mempool *mp;
static struct rte_mempool *mp_cache, *mp_nocache;
+static int use_external_cache;
+static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
static rte_atomic32_t synchro;
@@ -110,7 +126,7 @@ static unsigned n_keep;
/* number of enqueues / dequeues */
struct mempool_test_stats {
- unsigned enq_count;
+ uint64_t enq_count;
} __rte_cache_aligned;
static struct mempool_test_stats stats[RTE_MAX_LCORE];
@@ -134,15 +150,27 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
void *obj_table[MAX_KEEP];
unsigned i, idx;
unsigned lcore_id = rte_lcore_id();
- int ret;
+ int ret = 0;
uint64_t start_cycles, end_cycles;
uint64_t time_diff = 0, hz = rte_get_timer_hz();
+ struct rte_mempool_cache *cache;
+
+ if (use_external_cache) {
+ /* Create a user-owned mempool cache. */
+ cache = rte_mempool_cache_create(external_cache_size,
+ SOCKET_ID_ANY);
+ if (cache == NULL)
+ RET_ERR();
+ } else {
+ /* May be NULL if cache is disabled. */
+ cache = rte_mempool_default_cache(mp, lcore_id);
+ }
/* n_get_bulk and n_put_bulk must be divisors of n_keep */
if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep)
- return -1;
+ GOTO_ERR(ret, out);
if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep)
- return -1;
+ GOTO_ERR(ret, out);
stats[lcore_id].enq_count = 0;
@@ -157,13 +185,14 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
/* get n_keep objects by bulk of n_bulk */
idx = 0;
while (idx < n_keep) {
- ret = rte_mempool_get_bulk(mp, &obj_table[idx],
- n_get_bulk);
+ ret = rte_mempool_generic_get(mp,
+ &obj_table[idx],
+ n_get_bulk,
+ cache, 0);
if (unlikely(ret < 0)) {
rte_mempool_dump(stdout, mp);
- rte_ring_dump(stdout, mp->ring);
/* in this case, objects are lost... */
- return -1;
+ GOTO_ERR(ret, out);
}
idx += n_get_bulk;
}
@@ -171,8 +200,9 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
/* put the objects back */
idx = 0;
while (idx < n_keep) {
- rte_mempool_put_bulk(mp, &obj_table[idx],
- n_put_bulk);
+ rte_mempool_generic_put(mp, &obj_table[idx],
+ n_put_bulk,
+ cache, 0);
idx += n_put_bulk;
}
}
@@ -181,7 +211,13 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)
stats[lcore_id].enq_count += N;
}
- return 0;
+out:
+ if (use_external_cache) {
+ rte_mempool_cache_flush(cache, mp);
+ rte_mempool_cache_free(cache);
+ }
+
+ return ret;
}
/* launch all the per-lcore test, and display the result */
@@ -189,7 +225,7 @@ static int
launch_cores(unsigned cores)
{
unsigned lcore_id;
- unsigned rate;
+ uint64_t rate;
int ret;
unsigned cores_save = cores;
@@ -200,9 +236,11 @@ launch_cores(unsigned cores)
printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u "
"n_put_bulk=%u n_keep=%u ",
- (unsigned) mp->cache_size, cores, n_get_bulk, n_put_bulk, n_keep);
+ use_external_cache ?
+ external_cache_size : (unsigned) mp->cache_size,
+ cores, n_get_bulk, n_put_bulk, n_keep);
- if (rte_mempool_count(mp) != MEMPOOL_SIZE) {
+ if (rte_mempool_avail_count(mp) != MEMPOOL_SIZE) {
printf("mempool is not full\n");
return -1;
}
@@ -238,7 +276,7 @@ launch_cores(unsigned cores)
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
rate += (stats[lcore_id].enq_count / TIME_S);
- printf("rate_persec=%u\n", rate);
+ printf("rate_persec=%" PRIu64 "\n", rate);
return 0;
}
@@ -324,6 +362,20 @@ test_mempool_perf(void)
if (do_one_mempool_test(rte_lcore_count()) < 0)
return -1;
+ /* performance test with 1, 2 and max cores */
+ printf("start performance test (with user-owned cache)\n");
+ mp = mp_nocache;
+ use_external_cache = 1;
+
+ if (do_one_mempool_test(1) < 0)
+ return -1;
+
+ if (do_one_mempool_test(2) < 0)
+ return -1;
+
+ if (do_one_mempool_test(rte_lcore_count()) < 0)
+ return -1;
+
rte_mempool_list_dump(stdout);
return 0;
diff --git a/app/test/test_mp_secondary.c b/app/test/test_mp_secondary.c
index 4dfe418e..f66b68f2 100644
--- a/app/test/test_mp_secondary.c
+++ b/app/test/test_mp_secondary.c
@@ -245,6 +245,7 @@ run_object_creation_tests(void)
printf("# Checked rte_lpm_create() OK\n");
#endif
+#ifdef RTE_APP_TEST_RESOURCE_TAR
/* Run a test_pci call */
if (test_pci() != 0) {
printf("PCI scan failed in secondary\n");
@@ -252,6 +253,7 @@ run_object_creation_tests(void)
return -1;
} else
printf("PCI scan succeeded in secondary\n");
+#endif
return 0;
}
@@ -266,9 +268,11 @@ test_mp_secondary(void)
{
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
if (!test_pci_run) {
+#ifdef RTE_APP_TEST_RESOURCE_TAR
printf("=== Running pre-requisite test of test_pci\n");
test_pci();
printf("=== Requisite test done\n");
+#endif
}
return run_secondary_instances();
}
diff --git a/app/test/test_pci.c b/app/test/test_pci.c
index 0ed357e6..354a0ad9 100644
--- a/app/test/test_pci.c
+++ b/app/test/test_pci.c
@@ -43,6 +43,7 @@
#include <rte_devargs.h>
#include "test.h"
+#include "resource.h"
/* Generic maximum number of drivers to have room to allocate all drivers */
#define NUM_MAX_DRIVERS 256
@@ -66,21 +67,14 @@ static int my_driver_init(struct rte_pci_driver *dr,
/* IXGBE NICS */
struct rte_pci_id my_driver_id[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include <rte_pci_dev_ids.h>
-
-{ .vendor_id = 0, /* sentinel */ },
+ {RTE_PCI_DEVICE(0x0001, 0x1234)},
+ { .vendor_id = 0, /* sentinel */ },
};
struct rte_pci_id my_driver_id2[] = {
-
-/* IGB & EM NICS */
-#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include <rte_pci_dev_ids.h>
-
-{ .vendor_id = 0, /* sentinel */ },
+ {RTE_PCI_DEVICE(0x0001, 0x4444)},
+ {RTE_PCI_DEVICE(0x0002, 0xabcd)},
+ { .vendor_id = 0, /* sentinel */ },
};
struct rte_pci_driver my_driver = {
@@ -144,23 +138,102 @@ static void free_devargs_list(void)
}
}
-int
-test_pci(void)
+/* backup real devices & drivers (not used for testing) */
+struct pci_driver_list real_pci_driver_list =
+ TAILQ_HEAD_INITIALIZER(real_pci_driver_list);
+struct pci_device_list real_pci_device_list =
+ TAILQ_HEAD_INITIALIZER(real_pci_device_list);
+
+REGISTER_LINKED_RESOURCE(test_pci_sysfs);
+
+static int
+test_pci_setup(void)
{
- struct rte_devargs_list save_devargs_list;
- struct rte_pci_driver *dr = NULL;
- struct rte_pci_driver *save_pci_driver_list[NUM_MAX_DRIVERS];
- unsigned i, num_drivers = 0;
+ struct rte_pci_device *dev;
+ struct rte_pci_driver *dr;
+ const struct resource *r;
+ int ret;
- printf("Dump all devices\n");
- rte_eal_pci_dump(stdout);
+ r = resource_find("test_pci_sysfs");
+ TEST_ASSERT_NOT_NULL(r, "missing resource test_pci_sysfs");
+
+ ret = resource_untar(r);
+ TEST_ASSERT_SUCCESS(ret, "failed to untar %s", r->name);
+
+ ret = setenv("SYSFS_PCI_DEVICES", "test_pci_sysfs/bus/pci/devices", 1);
+ TEST_ASSERT_SUCCESS(ret, "failed to setenv");
- /* Unregister all previous drivers */
- TAILQ_FOREACH(dr, &pci_driver_list, next) {
+ /* Unregister original devices & drivers lists */
+ while (!TAILQ_EMPTY(&pci_driver_list)) {
+ dr = TAILQ_FIRST(&pci_driver_list);
rte_eal_pci_unregister(dr);
- save_pci_driver_list[num_drivers++] = dr;
+ TAILQ_INSERT_TAIL(&real_pci_driver_list, dr, next);
}
+ while (!TAILQ_EMPTY(&pci_device_list)) {
+ dev = TAILQ_FIRST(&pci_device_list);
+ TAILQ_REMOVE(&pci_device_list, dev, next);
+ TAILQ_INSERT_TAIL(&real_pci_device_list, dev, next);
+ }
+
+ ret = rte_eal_pci_scan();
+ TEST_ASSERT_SUCCESS(ret, "failed to scan PCI bus");
+ rte_eal_pci_dump(stdout);
+
+ return 0;
+}
+
+static int
+test_pci_cleanup(void)
+{
+ struct rte_pci_device *dev;
+ struct rte_pci_driver *dr;
+ const struct resource *r;
+ int ret;
+
+ unsetenv("SYSFS_PCI_DEVICES");
+
+ r = resource_find("test_pci_sysfs");
+ TEST_ASSERT_NOT_NULL(r, "missing resource test_pci_sysfs");
+
+ ret = resource_rm_by_tar(r);
+ TEST_ASSERT_SUCCESS(ret, "Failed to delete resource %s", r->name);
+
+ /*
+ * FIXME: there is no API in DPDK to free a rte_pci_device so we
+ * cannot free the devices in the right way. Let's assume that we
+ * don't care for tests.
+ */
+ while (!TAILQ_EMPTY(&pci_device_list)) {
+ dev = TAILQ_FIRST(&pci_device_list);
+ TAILQ_REMOVE(&pci_device_list, dev, next);
+ }
+
+ /* Restore original devices & drivers lists */
+ while (!TAILQ_EMPTY(&real_pci_driver_list)) {
+ dr = TAILQ_FIRST(&real_pci_driver_list);
+ TAILQ_REMOVE(&real_pci_driver_list, dr, next);
+ rte_eal_pci_register(dr);
+ }
+
+ while (!TAILQ_EMPTY(&real_pci_device_list)) {
+ dev = TAILQ_FIRST(&real_pci_device_list);
+ TAILQ_REMOVE(&real_pci_device_list, dev, next);
+ TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+ }
+
+ return 0;
+}
+
+static int
+test_pci_blacklist(void)
+{
+ struct rte_devargs_list save_devargs_list;
+
+ printf("Dump all devices\n");
+ TEST_ASSERT(TAILQ_EMPTY(&pci_driver_list),
+ "pci_driver_list not empty");
+
rte_eal_pci_register(&my_driver);
rte_eal_pci_register(&my_driver2);
@@ -196,9 +269,48 @@ test_pci(void)
rte_eal_pci_unregister(&my_driver);
rte_eal_pci_unregister(&my_driver2);
- /* Restore original driver list */
- for (i = 0; i < num_drivers; i++)
- rte_eal_pci_register(save_pci_driver_list[i]);
+ return 0;
+}
+
+static int test_pci_sysfs(void)
+{
+ const char *orig;
+ const char *newpath;
+ int ret;
+
+ orig = pci_get_sysfs_path();
+ ret = setenv("SYSFS_PCI_DEVICES", "My Documents", 1);
+ TEST_ASSERT_SUCCESS(ret, "Failed setenv to My Documents");
+
+ newpath = pci_get_sysfs_path();
+ TEST_ASSERT(!strcmp(newpath, "My Documents"),
+ "pci_get_sysfs_path() should return 'My Documents' "
+ "but gives %s", newpath);
+
+ ret = setenv("SYSFS_PCI_DEVICES", orig, 1);
+ TEST_ASSERT_SUCCESS(ret, "Failed setenv back to '%s'", orig);
+
+ newpath = pci_get_sysfs_path();
+ TEST_ASSERT(!strcmp(orig, newpath),
+ "pci_get_sysfs_path returned unexpected path: "
+ "%s (expected: %s)", newpath, orig);
+ return 0;
+}
+
+int
+test_pci(void)
+{
+ if (test_pci_sysfs())
+ return -1;
+
+ if (test_pci_setup())
+ return -1;
+
+ if (test_pci_blacklist())
+ return -1;
+
+ if (test_pci_cleanup())
+ return -1;
return 0;
}
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/class b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/class
new file mode 100644
index 00000000..2f9c1dad
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/class
@@ -0,0 +1 @@
+0x020000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/config b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/config
new file mode 100644
index 00000000..7752421c
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/config
Binary files differ
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/consistent_dma_mask_bits b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/consistent_dma_mask_bits
new file mode 100644
index 00000000..900731ff
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/consistent_dma_mask_bits
@@ -0,0 +1 @@
+64
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/device b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/device
new file mode 100644
index 00000000..48a62909
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/device
@@ -0,0 +1 @@
+0x1234
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/dma_mask_bits b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/dma_mask_bits
new file mode 100644
index 00000000..900731ff
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/dma_mask_bits
@@ -0,0 +1 @@
+64
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/enable b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/enable
new file mode 100644
index 00000000..d00491fd
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/enable
@@ -0,0 +1 @@
+1
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/irq b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/irq
new file mode 100644
index 00000000..573541ac
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/irq
@@ -0,0 +1 @@
+0
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/modalias b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/modalias
new file mode 100644
index 00000000..f4c76ed9
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/modalias
@@ -0,0 +1 @@
+pci:v00008086d000010FBsv00008086sd00000003bc02sc00i00
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/msi_bus b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/msi_bus
new file mode 100644
index 00000000..d00491fd
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/msi_bus
@@ -0,0 +1 @@
+1
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/numa_node b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/numa_node
new file mode 100644
index 00000000..3a2e3f49
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/numa_node
@@ -0,0 +1 @@
+-1
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/resource b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/resource
new file mode 100644
index 00000000..f3889296
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/resource
@@ -0,0 +1,13 @@
+0x00000000d0080000 0x00000000d00fffff 0x000000000014220c
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x000000000000e020 0x000000000000e03f 0x0000000000040101
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000d0104000 0x00000000d0107fff 0x000000000014220c
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000ab000000 0x00000000ab0fffff 0x0000000000140204
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000ab100000 0x00000000ab1fffff 0x0000000000140204
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_numvfs b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_numvfs
new file mode 100644
index 00000000..573541ac
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_numvfs
@@ -0,0 +1 @@
+0
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_totalvfs b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_totalvfs
new file mode 100644
index 00000000..4b9026d8
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_totalvfs
@@ -0,0 +1 @@
+63
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_device b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_device
new file mode 100644
index 00000000..89a932cc
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_device
@@ -0,0 +1 @@
+0x0003
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_vendor
new file mode 100644
index 00000000..446afb4b
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_vendor
@@ -0,0 +1 @@
+0x0001
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/uevent b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/uevent
new file mode 100644
index 00000000..1dbe34de
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/uevent
@@ -0,0 +1,6 @@
+DRIVER=ixgbe
+PCI_CLASS=20000
+PCI_ID=8086:10FB
+PCI_SUBSYS_ID=8086:0003
+PCI_SLOT_NAME=0000:01:00.0
+MODALIAS=pci:v00008086d000010FBsv00008086sd00000003bc02sc00i00
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/vendor
new file mode 100644
index 00000000..446afb4b
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/vendor
@@ -0,0 +1 @@
+0x0001
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/class b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/class
new file mode 100644
index 00000000..22dd9361
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/class
@@ -0,0 +1 @@
+0x100000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/device b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/device
new file mode 100644
index 00000000..f61bbe63
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/device
@@ -0,0 +1 @@
+0xabcd
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/resource b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/resource
new file mode 100644
index 00000000..f3889296
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/resource
@@ -0,0 +1,13 @@
+0x00000000d0080000 0x00000000d00fffff 0x000000000014220c
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x000000000000e020 0x000000000000e03f 0x0000000000040101
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000d0104000 0x00000000d0107fff 0x000000000014220c
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000ab000000 0x00000000ab0fffff 0x0000000000140204
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000ab100000 0x00000000ab1fffff 0x0000000000140204
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_device b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_device
new file mode 100644
index 00000000..f61bbe63
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_device
@@ -0,0 +1 @@
+0xabcd
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_vendor
new file mode 100644
index 00000000..4321b81f
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_vendor
@@ -0,0 +1 @@
+0x0002
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/vendor
new file mode 100644
index 00000000..4321b81f
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/vendor
@@ -0,0 +1 @@
+0x0002
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/class b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/class
new file mode 100644
index 00000000..22dd9361
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/class
@@ -0,0 +1 @@
+0x100000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/device b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/device
new file mode 100644
index 00000000..ccaa4982
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/device
@@ -0,0 +1 @@
+0x4444
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/resource b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/resource
new file mode 100644
index 00000000..f3889296
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/resource
@@ -0,0 +1,13 @@
+0x00000000d0080000 0x00000000d00fffff 0x000000000014220c
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x000000000000e020 0x000000000000e03f 0x0000000000040101
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000d0104000 0x00000000d0107fff 0x000000000014220c
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000ab000000 0x00000000ab0fffff 0x0000000000140204
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x00000000ab100000 0x00000000ab1fffff 0x0000000000140204
+0x0000000000000000 0x0000000000000000 0x0000000000000000
+0x0000000000000000 0x0000000000000000 0x0000000000000000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_device b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_device
new file mode 100644
index 00000000..ccaa4982
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_device
@@ -0,0 +1 @@
+0x4444
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_vendor
new file mode 100644
index 00000000..446afb4b
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_vendor
@@ -0,0 +1 @@
+0x0001
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/vendor
new file mode 100644
index 00000000..446afb4b
--- /dev/null
+++ b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/vendor
@@ -0,0 +1 @@
+0x0001
diff --git a/app/test/test_per_lcore.c b/app/test/test_per_lcore.c
index b16449a9..f452cdb0 100644
--- a/app/test/test_per_lcore.c
+++ b/app/test/test_per_lcore.c
@@ -92,8 +92,8 @@ display_vars(__attribute__((unused)) void *arg)
static int
test_per_lcore_delay(__attribute__((unused)) void *arg)
{
- rte_delay_ms(5000);
- printf("wait 5000ms on lcore %u\n", rte_lcore_id());
+ rte_delay_ms(100);
+ printf("wait 100ms on lcore %u\n", rte_lcore_id());
return 0;
}
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index 59803f7c..3d56cd29 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -709,9 +709,6 @@ test_pmd_perf(void)
return -1;
}
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
nb_lcores = rte_lcore_count();
memset(lcore_conf, 0, sizeof(lcore_conf));
diff --git a/app/test/test_red.c b/app/test/test_red.c
index 81c9d679..2384c556 100644
--- a/app/test/test_red.c
+++ b/app/test/test_red.c
@@ -57,6 +57,7 @@
#define MSEC_PER_SEC 1000 /**< Milli-seconds per second */
#define USEC_PER_MSEC 1000 /**< Micro-seconds per milli-second */
#define USEC_PER_SEC 1000000 /**< Micro-seconds per second */
+#define NSEC_PER_SEC (USEC_PER_SEC * 1000) /**< Nano-seconds per second */
/**< structures for testing rte_red performance and function */
struct test_rte_red_config { /**< Test structure for RTE_RED config */
@@ -280,12 +281,15 @@ static uint64_t get_machclk_freq(void)
uint64_t start = 0;
uint64_t end = 0;
uint64_t diff = 0;
- uint64_t clk_freq_hz = 0;
+ static uint64_t clk_freq_hz;
struct timespec tv_start = {0, 0}, tv_end = {0, 0};
struct timespec req = {0, 0};
- req.tv_sec = 1;
- req.tv_nsec = 0;
+ if (clk_freq_hz != 0)
+ return clk_freq_hz;
+
+ req.tv_sec = 0;
+ req.tv_nsec = NSEC_PER_SEC / 4;
clock_gettime(CLOCK_REALTIME, &tv_start);
start = rte_rdtsc();
@@ -435,8 +439,8 @@ static struct test_queue ft_tqueue = {
};
static struct test_var ft_tvar = {
- .wait_usec = 250000,
- .num_iterations = 20,
+ .wait_usec = 10000,
+ .num_iterations = 5,
.num_ops = 10000,
.clk_freq = 0,
.dropped = ft_dropped,
@@ -1747,6 +1751,16 @@ struct tests func_tests[] = {
{ &ovfl_test1_config, ovfl_test1 },
};
+struct tests func_tests_quick[] = {
+ { &func_test1_config, func_test1 },
+ { &func_test2_config, func_test2 },
+ { &func_test3_config, func_test3 },
+ /* no test 4 as it takes a lot of time */
+ { &func_test5_config, func_test5 },
+ { &func_test6_config, func_test6 },
+ { &ovfl_test1_config, ovfl_test1 },
+};
+
struct tests perf_tests[] = {
{ &perf1_test1_config, perf1_test },
{ &perf1_test2_config, perf1_test },
@@ -1850,27 +1864,60 @@ test_invalid_parameters(void)
return 0;
}
+static void
+show_stats(const uint32_t num_tests, const uint32_t num_pass)
+{
+ if (num_pass == num_tests)
+ printf("[total: %u, pass: %u]\n", num_tests, num_pass);
+ else
+ printf("[total: %u, pass: %u, fail: %u]\n", num_tests, num_pass,
+ num_tests - num_pass);
+}
+
+static int
+tell_the_result(const uint32_t num_tests, const uint32_t num_pass)
+{
+ return (num_pass == num_tests) ? 0 : 1;
+}
+
static int
test_red(void)
{
uint32_t num_tests = 0;
uint32_t num_pass = 0;
- int ret = 0;
if (test_invalid_parameters() < 0)
return -1;
+ run_tests(func_tests_quick, RTE_DIM(func_tests_quick),
+ &num_tests, &num_pass);
+ show_stats(num_tests, num_pass);
+ return tell_the_result(num_tests, num_pass);
+}
+
+static int
+test_red_perf(void)
+{
+ uint32_t num_tests = 0;
+ uint32_t num_pass = 0;
- run_tests(func_tests, RTE_DIM(func_tests), &num_tests, &num_pass);
run_tests(perf_tests, RTE_DIM(perf_tests), &num_tests, &num_pass);
+ show_stats(num_tests, num_pass);
+ return tell_the_result(num_tests, num_pass);
+}
- if (num_pass == num_tests) {
- printf("[total: %u, pass: %u]\n", num_tests, num_pass);
- ret = 0;
- } else {
- printf("[total: %u, pass: %u, fail: %u]\n", num_tests, num_pass, num_tests - num_pass);
- ret = -1;
- }
- return ret;
+static int
+test_red_all(void)
+{
+ uint32_t num_tests = 0;
+ uint32_t num_pass = 0;
+
+ if (test_invalid_parameters() < 0)
+ return -1;
+
+ run_tests(func_tests, RTE_DIM(func_tests), &num_tests, &num_pass);
+ run_tests(perf_tests, RTE_DIM(perf_tests), &num_tests, &num_pass);
+ show_stats(num_tests, num_pass);
+ return tell_the_result(num_tests, num_pass);
}
static struct test_command red_cmd = {
@@ -1878,3 +1925,15 @@ static struct test_command red_cmd = {
.callback = test_red,
};
REGISTER_TEST_COMMAND(red_cmd);
+
+static struct test_command red_cmd_perf = {
+ .command = "red_perf",
+ .callback = test_red_perf,
+};
+REGISTER_TEST_COMMAND(red_cmd_perf);
+
+static struct test_command red_cmd_all = {
+ .command = "red_all",
+ .callback = test_red_all,
+};
+REGISTER_TEST_COMMAND(red_cmd_all);
diff --git a/app/test/test_resource.c b/app/test/test_resource.c
new file mode 100644
index 00000000..39a64689
--- /dev/null
+++ b/app/test/test_resource.c
@@ -0,0 +1,137 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of RehiveTech nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "test.h"
+#include "resource.h"
+
+const char test_resource_dpdk_blob[] = {
+ '\x44', '\x50', '\x44', '\x4b', '\x00'
+};
+
+REGISTER_RESOURCE(test_resource_dpdk,
+ test_resource_dpdk_blob, test_resource_dpdk_blob + 4);
+
+static int test_resource_dpdk(void)
+{
+ const struct resource *r;
+
+ r = resource_find("test_resource_dpdk");
+ TEST_ASSERT_NOT_NULL(r, "Could not find test_resource_dpdk");
+ TEST_ASSERT(!strcmp(r->name, "test_resource_dpdk"),
+ "Found resource %s, expected test_resource_dpdk",
+ r->name);
+
+ TEST_ASSERT(!strncmp("DPDK", r->begin, 4),
+ "Unexpected payload: %.4s...", r->begin);
+
+ return 0;
+}
+
+REGISTER_LINKED_RESOURCE(test_resource_c);
+
+static int test_resource_c(void)
+{
+ const struct resource *r;
+ FILE *f;
+
+ r = resource_find("test_resource_c");
+ TEST_ASSERT_NOT_NULL(r, "No test_resource_c found");
+ TEST_ASSERT(!strcmp(r->name, "test_resource_c"),
+ "Found resource %s, expected test_resource_c",
+ r->name);
+
+ TEST_ASSERT_SUCCESS(resource_fwrite_file(r, "test_resource.c"),
+ "Failed to to write file %s", r->name);
+
+ f = fopen("test_resource.c", "r");
+ TEST_ASSERT_NOT_NULL(f,
+ "Missing extracted file resource.c");
+ fclose(f);
+ remove("test_resource.c");
+
+ return 0;
+}
+
+#ifdef RTE_APP_TEST_RESOURCE_TAR
+REGISTER_LINKED_RESOURCE(test_resource_tar);
+
+static int test_resource_tar(void)
+{
+ const struct resource *r;
+ FILE *f;
+
+ r = resource_find("test_resource_tar");
+ TEST_ASSERT_NOT_NULL(r, "No test_resource_tar found");
+ TEST_ASSERT(!strcmp(r->name, "test_resource_tar"),
+ "Found resource %s, expected test_resource_tar",
+ r->name);
+
+ TEST_ASSERT_SUCCESS(resource_untar(r),
+ "Failed to to untar %s", r->name);
+
+ f = fopen("test_resource.c", "r");
+ TEST_ASSERT_NOT_NULL(f,
+ "Missing extracted file test_resource.c");
+ fclose(f);
+
+ TEST_ASSERT_SUCCESS(resource_rm_by_tar(r),
+ "Failed to remove extracted contents of %s", r->name);
+ return 0;
+}
+
+#endif /* RTE_APP_TEST_RESOURCE_TAR */
+
+static int test_resource(void)
+{
+ if (test_resource_dpdk())
+ return -1;
+
+ if (test_resource_c())
+ return -1;
+
+#ifdef RTE_APP_TEST_RESOURCE_TAR
+ if (test_resource_tar())
+ return -1;
+#endif /* RTE_APP_TEST_RESOURCE_TAR */
+
+ return 0;
+}
+
+static struct test_command resource_cmd = {
+ .command = "resource_autotest",
+ .callback = test_resource,
+};
+REGISTER_TEST_COMMAND(resource_cmd);
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
index 0d7523ed..9095e597 100644
--- a/app/test/test_ring.c
+++ b/app/test/test_ring.c
@@ -100,8 +100,6 @@
#define RING_SIZE 4096
#define MAX_BULK 32
-#define N 65536
-#define TIME_S 5
static rte_atomic32_t synchro;
@@ -130,7 +128,7 @@ check_live_watermark_change(__attribute__((unused)) void *dummy)
/* init the object table */
memset(obj_table, 0, sizeof(obj_table));
- end_time = rte_get_timer_cycles() + (hz * 2);
+ end_time = rte_get_timer_cycles() + (hz / 4);
/* check that bulk and watermark are 4 and 32 (respectively) */
while (diff >= 0) {
@@ -194,9 +192,9 @@ test_live_watermark_change(void)
* watermark and quota */
rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
- rte_delay_ms(1000);
+ rte_delay_ms(100);
rte_ring_set_water_mark(r, 32);
- rte_delay_ms(1000);
+ rte_delay_ms(100);
if (rte_eal_wait_lcore(lcore_id2) < 0)
return -1;
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 16ced7fb..180d6deb 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -129,7 +129,7 @@ test_spinlock_recursive_per_core(__attribute__((unused)) void *arg)
static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
static uint64_t lock_count[RTE_MAX_LCORE] = {0};
-#define TIME_S 5
+#define TIME_MS 100
static int
load_loop_fn(void *func_param)
@@ -145,7 +145,7 @@ load_loop_fn(void *func_param)
while (rte_atomic32_read(&synchro) == 0);
begin = rte_get_timer_cycles();
- while (time_diff / hz < TIME_S) {
+ while (time_diff < hz * TIME_MS / 1000) {
if (use_lock)
rte_spinlock_lock(&lk);
lcount++;
@@ -258,7 +258,7 @@ test_spinlock(void)
RTE_LCORE_FOREACH_SLAVE(i) {
rte_spinlock_unlock(&sl_tab[i]);
- rte_delay_ms(100);
+ rte_delay_ms(10);
}
rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index 944e2adc..bc07925e 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -137,7 +137,7 @@
#include <rte_random.h>
#include <rte_malloc.h>
-#define TEST_DURATION_S 20 /* in seconds */
+#define TEST_DURATION_S 1 /* in seconds */
#define NB_TIMER 4
#define RTE_LOGTYPE_TESTTIMER RTE_LOGTYPE_USER3
@@ -305,7 +305,7 @@ timer_stress2_main_loop(__attribute__((unused)) void *arg)
{
static struct rte_timer *timers;
int i, ret;
- uint64_t delay = rte_get_timer_hz() / 4;
+ uint64_t delay = rte_get_timer_hz() / 20;
unsigned lcore_id = rte_lcore_id();
unsigned master = rte_get_master_lcore();
int32_t my_collisions = 0;
@@ -346,7 +346,7 @@ timer_stress2_main_loop(__attribute__((unused)) void *arg)
rte_atomic32_add(&collisions, my_collisions);
/* wait long enough for timers to expire */
- rte_delay_ms(500);
+ rte_delay_ms(100);
/* all cores rendezvous */
if (lcore_id == master) {
@@ -396,7 +396,7 @@ timer_stress2_main_loop(__attribute__((unused)) void *arg)
}
/* wait long enough for timers to expire */
- rte_delay_ms(500);
+ rte_delay_ms(100);
/* now check that we get the right number of callbacks */
if (lcore_id == master) {
@@ -495,13 +495,13 @@ timer_basic_main_loop(__attribute__((unused)) void *arg)
/* launch all timers on core 0 */
if (lcore_id == rte_get_master_lcore()) {
- mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
+ mytimer_reset(&mytiminfo[0], hz/4, SINGLE, lcore_id,
timer_basic_cb);
- mytimer_reset(&mytiminfo[1], hz*2, SINGLE, lcore_id,
+ mytimer_reset(&mytiminfo[1], hz/2, SINGLE, lcore_id,
timer_basic_cb);
- mytimer_reset(&mytiminfo[2], hz, PERIODICAL, lcore_id,
+ mytimer_reset(&mytiminfo[2], hz/4, PERIODICAL, lcore_id,
timer_basic_cb);
- mytimer_reset(&mytiminfo[3], hz, PERIODICAL,
+ mytimer_reset(&mytiminfo[3], hz/4, PERIODICAL,
rte_get_next_lcore(lcore_id, 0, 1),
timer_basic_cb);
}
@@ -591,7 +591,7 @@ test_timer(void)
end_time = cur_time + (hz * TEST_DURATION_S);
/* start other cores */
- printf("Start timer stress tests (%d seconds)\n", TEST_DURATION_S);
+ printf("Start timer stress tests\n");
rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MASTER);
rte_eal_mp_wait_lcore();
@@ -612,7 +612,7 @@ test_timer(void)
end_time = cur_time + (hz * TEST_DURATION_S);
/* start other cores */
- printf("\nStart timer basic tests (%d seconds)\n", TEST_DURATION_S);
+ printf("\nStart timer basic tests\n");
rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MASTER);
rte_eal_mp_wait_lcore();
diff --git a/config/common_base b/config/common_base
index 0124e86c..cfee8257 100644
--- a/config/common_base
+++ b/config/common_base
@@ -88,7 +88,7 @@ CONFIG_RTE_MAX_NUMA_NODES=8
CONFIG_RTE_MAX_MEMSEG=256
CONFIG_RTE_MAX_MEMZONE=2560
CONFIG_RTE_MAX_TAILQ=32
-CONFIG_RTE_LOG_LEVEL=8
+CONFIG_RTE_LOG_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
CONFIG_RTE_LIBEAL_USE_HPET=n
CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
@@ -101,14 +101,6 @@ CONFIG_RTE_MALLOC_DEBUG=n
CONFIG_RTE_EAL_PMD_PATH=""
#
-# Special configurations in PCI Config Space for high performance
-# They are all deprecated, and will be removed later.
-#
-CONFIG_RTE_PCI_CONFIG=n
-CONFIG_RTE_PCI_EXTENDED_TAG=""
-CONFIG_RTE_PCI_MAX_READ_REQUEST_SIZE=0
-
-#
# Compile Environment Abstraction Layer to support Vmware TSC map
#
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=y
@@ -215,8 +207,6 @@ CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS=1
#
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
-CONFIG_RTE_LIBRTE_MLX5_SGE_WR_N=4
-CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE=0
CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8
#
@@ -253,6 +243,11 @@ CONFIG_RTE_LIBRTE_NFP_PMD=n
CONFIG_RTE_LIBRTE_NFP_DEBUG=n
#
+# Compile burst-oriented Broadcom BNXT PMD driver
+#
+CONFIG_RTE_LIBRTE_BNXT_PMD=y
+
+#
# Compile software PMD backed by SZEDATA2 device
#
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
@@ -267,6 +262,16 @@ CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS=0
#
+# Compile burst-oriented Cavium Thunderx NICVF PMD driver
+#
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n
+
+#
# Compile burst-oriented VIRTIO PMD driver
#
CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
@@ -305,6 +310,18 @@ CONFIG_RTE_LIBRTE_PMD_BOND=y
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB_L1=n
+# QLogic 25G/40G/100G PMD
+#
+CONFIG_RTE_LIBRTE_QEDE_PMD=n
+CONFIG_RTE_LIBRTE_QEDE_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_QEDE_DEBUG_INFO=n
+CONFIG_RTE_LIBRTE_QEDE_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n
+#Provides abs path/name of the firmware file.
+#Empty string denotes driver will use default firmware
+CONFIG_RTE_LIBRTE_QEDE_FW=""
+
#
# Compile software PMD backed by AF_PACKET sockets (Linux only)
#
@@ -366,6 +383,12 @@ CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n
#
+# Compile PMD for KASUMI device
+#
+CONFIG_RTE_LIBRTE_PMD_KASUMI=n
+CONFIG_RTE_LIBRTE_PMD_KASUMI_DEBUG=n
+
+#
# Compile PMD for NULL Crypto device
#
CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
@@ -390,6 +413,7 @@ CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
#
CONFIG_RTE_LIBRTE_MBUF=y
CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
+CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc"
CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
@@ -512,6 +536,11 @@ CONFIG_RTE_KNI_VHOST_DEBUG_RX=n
CONFIG_RTE_KNI_VHOST_DEBUG_TX=n
#
+# Compile the pdump library
+#
+CONFIG_RTE_LIBRTE_PDUMP=y
+
+#
# Compile vhost library
# fuse-devel is needed to run vhost-cuse.
# fuse-devel enables user space char driver development
@@ -542,6 +571,7 @@ CONFIG_RTE_INSECURE_FUNCTION_WARNING=n
# Compile the test application
#
CONFIG_RTE_APP_TEST=y
+CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
#
# Compile the PMD test application
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 7e698e27..2483dfa5 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -43,3 +43,4 @@ CONFIG_RTE_LIBRTE_VHOST=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
CONFIG_RTE_LIBRTE_POWER=y
+CONFIG_RTE_VIRTIO_USER=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e626ec1e..bde6acd9 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -74,3 +74,4 @@ CONFIG_RTE_LIBRTE_MPIPE_PMD=n
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n
CONFIG_RTE_LIBRTE_PMD_BNX2X=n
+CONFIG_RTE_LIBRTE_QEDE_PMD=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 9abeca4c..1a171266 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -42,7 +42,8 @@ CONFIG_RTE_FORCE_INTRINSICS=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
-CONFIG_RTE_IXGBE_INC_VECTOR=n
+CONFIG_RTE_EAL_IGB_UIO=n
+
CONFIG_RTE_LIBRTE_IVSHMEM=n
CONFIG_RTE_LIBRTE_FM10K_PMD=n
CONFIG_RTE_LIBRTE_I40E_PMD=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
new file mode 100644
index 00000000..66df54c5
--- /dev/null
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -0,0 +1,42 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Freescale Semiconductor nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#include "defconfig_arm64-armv8a-linuxapp-gcc"
+
+# NXP (Freescale) - Soc Architecture with WRIOP and QBMAN support
+CONFIG_RTE_MACHINE="dpaa2"
+CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd"
+
+#
+# Compile Environment Abstraction Layer
+#
+CONFIG_RTE_MAX_LCORE=8
+CONFIG_RTE_MAX_NUMA_NODES=1
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index fe5e987a..a5b1e24c 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -34,3 +34,15 @@
CONFIG_RTE_MACHINE="thunderx"
CONFIG_RTE_CACHE_LINE_SIZE=128
+CONFIG_RTE_MAX_NUMA_NODES=2
+CONFIG_RTE_MAX_LCORE=96
+
+#
+# Compile Cavium Thunderx NICVF PMD driver
+#
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=y
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n
diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc
index c32859f7..ba072592 100644
--- a/config/defconfig_i686-native-linuxapp-gcc
+++ b/config/defconfig_i686-native-linuxapp-gcc
@@ -60,3 +60,8 @@ CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
# AES-NI GCM PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
+
+#
+# KASUMI PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_KASUMI=n
diff --git a/config/defconfig_i686-native-linuxapp-icc b/config/defconfig_i686-native-linuxapp-icc
index cde9d960..850e5364 100644
--- a/config/defconfig_i686-native-linuxapp-icc
+++ b/config/defconfig_i686-native-linuxapp-icc
@@ -60,3 +60,8 @@ CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
# AES-NI GCM PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
+
+#
+# KASUMI PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_PMD_KASUMI=n
diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc b/config/defconfig_ppc_64-power8-linuxapp-gcc
index 9eb0cc42..bef8f49a 100644
--- a/config/defconfig_ppc_64-power8-linuxapp-gcc
+++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
@@ -50,7 +50,7 @@ CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
# Will turn on them only after the successful testing on Power
CONFIG_RTE_LIBRTE_IXGBE_PMD=n
CONFIG_RTE_LIBRTE_I40E_PMD=n
-CONFIG_RTE_LIBRTE_VIRTIO_PMD=n
+CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_ENIC_PMD=n
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index f6263867..5e7f024d 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -118,6 +118,7 @@ There are many libraries, so their headers may be grouped by topics:
[frag] (@ref rte_port_frag.h),
[reass] (@ref rte_port_ras.h),
[sched] (@ref rte_port_sched.h),
+ [kni] (@ref rte_port_kni.h),
[src/sink] (@ref rte_port_source_sink.h)
* [table] (@ref rte_table.h):
[lpm IPv4] (@ref rte_table_lpm.h),
diff --git a/doc/guides/contributing/coding_style.rst b/doc/guides/contributing/coding_style.rst
index ad1392d2..1eb67f34 100644
--- a/doc/guides/contributing/coding_style.rst
+++ b/doc/guides/contributing/coding_style.rst
@@ -685,3 +685,11 @@ Control Statements
usage();
/* NOTREACHED */
}
+
+
+Python Code
+-----------
+
+All python code should be compliant with `PEP8 (Style Guide for Python Code) <https://www.python.org/dev/peps/pep-0008/>`_.
+
+The ``pep8`` tool can be used for testing compliance with the guidelines.
diff --git a/doc/guides/contributing/versioning.rst b/doc/guides/contributing/versioning.rst
index ae10a984..92b4d7ca 100644
--- a/doc/guides/contributing/versioning.rst
+++ b/doc/guides/contributing/versioning.rst
@@ -475,7 +475,7 @@ Where ``REV1`` and ``REV2`` are valid gitrevisions(7)
https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
on the local repo and target is the usual DPDK compilation target.
-For example:
+For example::
# Check between the previous and latest commit:
./scripts/validate-abi.sh HEAD~1 HEAD x86_64-native-linuxapp-gcc
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index 9e04853a..60a89142 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -46,8 +46,11 @@ AESNI MB PMD has support for:
Cipher algorithms:
* RTE_CRYPTO_SYM_CIPHER_AES128_CBC
+* RTE_CRYPTO_SYM_CIPHER_AES192_CBC
* RTE_CRYPTO_SYM_CIPHER_AES256_CBC
-* RTE_CRYPTO_SYM_CIPHER_AES512_CBC
+* RTE_CRYPTO_SYM_CIPHER_AES128_CTR
+* RTE_CRYPTO_SYM_CIPHER_AES192_CTR
+* RTE_CRYPTO_SYM_CIPHER_AES256_CTR
Hash algorithms:
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index a3f11f31..9616de1e 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -38,6 +38,7 @@ Crypto Device Drivers
overview
aesni_mb
aesni_gcm
+ kasumi
null
snow3g
- qat \ No newline at end of file
+ qat
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
new file mode 100644
index 00000000..d6b3a975
--- /dev/null
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -0,0 +1,101 @@
+.. BSD LICENSE
+ Copyright(c) 2016 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+KASUMI Crypto Poll Mode Driver
+===============================
+
+The KASUMI PMD (**librte_pmd_kasumi**) provides poll mode crypto driver
+support for utilizing Intel Libsso library, which implements F8 and F9 functions
+for KASUMI UEA1 cipher and UIA1 hash algorithms.
+
+Features
+--------
+
+KASUMI PMD has support for:
+
+Cipher algorithm:
+
+* RTE_CRYPTO_SYM_CIPHER_KASUMI_F8
+
+Authentication algorithm:
+
+* RTE_CRYPTO_SYM_AUTH_KASUMI_F9
+
+Limitations
+-----------
+
+* Chained mbufs are not supported.
+* KASUMI(F9) supported only if hash offset field is byte-aligned.
+
+Installation
+------------
+
+To build DPDK with the KASUMI_PMD the user is required to download
+the export controlled ``libsso_kasumi`` library, by requesting it from
+`<https://networkbuilders.intel.com/network-technologies/dpdk>`_.
+Once approval has been granted, the user needs to log in
+`<https://networkbuilders.intel.com/dpdklogin>`_
+and click on "Kasumi Bit Stream crypto library" link, to download the library.
+After downloading the library, the user needs to unpack and compile it
+on their system before building DPDK::
+
+ make kasumi
+
+Initialization
+--------------
+
+In order to enable this virtual crypto PMD, user must:
+
+* Export the environmental variable LIBSSO_KASUMI_PATH with the path where
+ the library was extracted (kasumi folder).
+
+* Build the LIBSSO library (explained in Installation section).
+
+* Set CONFIG_RTE_LIBRTE_PMD_KASUMI=y in config/common_base.
+
+To use the PMD in an application, user must:
+
+* Call rte_eal_vdev_init("cryptodev_kasumi_pmd") within the application.
+
+* Use --vdev="cryptodev_kasumi_pmd" in the EAL options, which will call rte_eal_vdev_init() internally.
+
+The following parameters (all optional) can be provided in the previous two calls:
+
+* socket_id: Specify the socket where the memory for the device is going to be allocated
+ (by default, socket_id will be the socket where the core that is creating the PMD is running on).
+
+* max_nb_queue_pairs: Specify the maximum number of queue pairs in the device (8 by default).
+
+* max_nb_sessions: Specify the maximum number of sessions that can be created (2048 by default).
+
+Example:
+
+.. code-block:: console
+
+ ./l2fwd-crypto -c 40 -n 4 --vdev="cryptodev_kasumi_pmd,socket_id=1,max_nb_sessions=128"
diff --git a/doc/guides/cryptodevs/overview.rst b/doc/guides/cryptodevs/overview.rst
index 9f9af43c..d612f71b 100644
--- a/doc/guides/cryptodevs/overview.rst
+++ b/doc/guides/cryptodevs/overview.rst
@@ -33,62 +33,63 @@ Crypto Device Supported Functionality Matrices
Supported Feature Flags
.. csv-table::
- :header: "Feature Flags", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g"
+ :header: "Feature Flags", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g", "kasumi"
:stub-columns: 1
- "RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO",x,x,,,
- "RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO",,,,,
- "RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING",x,x,x,x,x
- "RTE_CRYPTODEV_FF_CPU_SSE",,,x,x,x
- "RTE_CRYPTODEV_FF_CPU_AVX",,,x,x,x
- "RTE_CRYPTODEV_FF_CPU_AVX2",,,x,x,
- "RTE_CRYPTODEV_FF_CPU_AESNI",,,x,x,
- "RTE_CRYPTODEV_FF_HW_ACCELERATED",x,,,,
+ "RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO",x,x,x,x,x,x
+ "RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO",,,,,,
+ "RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING",x,x,x,x,x,x
+ "RTE_CRYPTODEV_FF_CPU_SSE",,,x,x,x,x
+ "RTE_CRYPTODEV_FF_CPU_AVX",,,x,x,x,x
+ "RTE_CRYPTODEV_FF_CPU_AVX2",,,x,x,,
+ "RTE_CRYPTODEV_FF_CPU_AESNI",,,x,x,,
+ "RTE_CRYPTODEV_FF_HW_ACCELERATED",x,,,,,
Supported Cipher Algorithms
.. csv-table::
- :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g"
+ :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g", "kasumi"
:stub-columns: 1
- "NULL",,x,,,
- "AES_CBC_128",x,,x,,
- "AES_CBC_192",x,,x,,
- "AES_CBC_256",x,,x,,
- "AES_CTR_128",,,,,
- "AES_CTR_192",,,,,
- "AES_CTR_256",,,,,
- "SNOW3G_UEA2",x,,,,x
+ "NULL",,x,,,,
+ "AES_CBC_128",x,,x,,,
+ "AES_CBC_192",x,,x,,,
+ "AES_CBC_256",x,,x,,,
+ "AES_CTR_128",x,,x,,,
+ "AES_CTR_192",x,,x,,,
+ "AES_CTR_256",x,,x,,,
+ "SNOW3G_UEA2",x,,,,x,
+ "KASUMI_F8",,,,,,x
Supported Authentication Algorithms
.. csv-table::
- :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g"
+ :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g", "kasumi"
:stub-columns: 1
- "NONE",,x,,,
- "MD5",,,,,
- "MD5_HMAC",,,x,,
- "SHA1",,,,,
- "SHA1_HMAC",x,,x,,
- "SHA224",,,,,
- "SHA224_HMAC",,,x,,
- "SHA256",,,,,
- "SHA256_HMAC",x,,x,,
- "SHA384",,,,,
- "SHA384_HMAC",,,x,,
- "SHA512",,,,,
- "SHA512_HMAC",x,,x,,
- "AES_XCBC",x,,x,,
- "SNOW3G_UIA2",x,,,,x
-
+ "NONE",,x,,,,
+ "MD5",,,,,,
+ "MD5_HMAC",,,x,,,
+ "SHA1",,,,,,
+ "SHA1_HMAC",x,,x,,,
+ "SHA224",,,,,,
+ "SHA224_HMAC",,,x,,,
+ "SHA256",,,,,,
+ "SHA256_HMAC",x,,x,,,
+ "SHA384",,,,,,
+ "SHA384_HMAC",,,x,,,
+ "SHA512",,,,,,
+ "SHA512_HMAC",x,,x,,,
+ "AES_XCBC",x,,x,,,
+ "SNOW3G_UIA2",x,,,,x,
+ "KASUMI_F9",,,,,,x
Supported AEAD Algorithms
.. csv-table::
- :header: "AEAD Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g"
+ :header: "AEAD Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g", "kasumi"
:stub-columns: 1
- "AES_GCM_128",x,,x,,
- "AES_GCM_192",x,,,,
- "AES_GCM_256",x,,,,
+ "AES_GCM_128",x,,x,,,
+ "AES_GCM_192",x,,,,,
+ "AES_GCM_256",x,,,,,
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 4b8f782a..cae19582 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -44,6 +44,9 @@ Cipher algorithms:
* ``RTE_CRYPTO_SYM_CIPHER_AES128_CBC``
* ``RTE_CRYPTO_SYM_CIPHER_AES192_CBC``
* ``RTE_CRYPTO_SYM_CIPHER_AES256_CBC``
+* ``RTE_CRYPTO_SYM_CIPHER_AES128_CTR``
+* ``RTE_CRYPTO_SYM_CIPHER_AES192_CTR``
+* ``RTE_CRYPTO_SYM_CIPHER_AES256_CTR``
* ``RTE_CRYPTO_SYM_CIPHER_SNOW3G_UEA2``
* ``RTE_CRYPTO_CIPHER_AES_GCM``
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index c1098b1a..670a62a9 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -51,55 +51,33 @@ Limitations
-----------
* Chained mbufs are not supported.
-* Snow3g(UEA2) supported only if cipher length, cipher offset fields are byte-aligned.
-* Snow3g(UIA2) supported only if hash length, hash offset fields are byte-aligned.
+* Snow3g(UIA2) supported only if hash offset field is byte-aligned.
+* In-place bit-level operations for Snow3g(UEA2) are not supported
+ (if length and/or offset of data to be ciphered is not byte-aligned).
Installation
------------
-To build DPDK with the SNOW3G_PMD the user is required to download
-the export controlled ``libsso`` library, by requesting it from
-`<https://networkbuilders.intel.com/network-technologies/dpdk>`_,
-and compiling it on their system before building DPDK::
-
- make -f Makefile_snow3g
-
-**Note**: If using a gcc version higher than 5.0, and compilation fails, apply the following patch:
-
-.. code-block:: diff
-
- /libsso/src/snow3g/sso_snow3g.c
-
- static inline void ClockFSM_4(sso_snow3gKeyState4_t *pCtx, __m128i *data)
- {
- __m128i F, R;
- - uint32_t K, L;
- + uint32_t K;
- + /* Declare unused if SNOW3G_WSM/SNB are defined */
- + uint32_t L __attribute__ ((unused)) = 0;
-
- F = _mm_add_epi32(pCtx->LFSR_X[15], pCtx->FSM_X[0]);
- R = _mm_xor_si128(pCtx->LFSR_X[5], pCtx->FSM_X[2]);
-
- /libsso/include/sso_snow3g_internal.h
-
- -inline void ClockFSM_1(sso_snow3gKeyState1_t *pCtx, uint32_t *data);
- -inline void ClockLFSR_1(sso_snow3gKeyState1_t *pCtx);
- -inline void sso_snow3gStateInitialize_1(sso_snow3gKeyState1_t * pCtx, sso_snow3g_key_schedule_t *pKeySched, uint8_t *pIV);
- +void ClockFSM_1(sso_snow3gKeyState1_t *pCtx, uint32_t *data);
- +void ClockLFSR_1(sso_snow3gKeyState1_t *pCtx);
- +void sso_snow3gStateInitialize_1(sso_snow3gKeyState1_t * pCtx, sso_snow3g_key_schedule_t *pKeySched, uint8_t *pIV);
+To build DPDK with the KASUMI_PMD the user is required to download
+the export controlled ``libsso_snow3g`` library, by requesting it from
+`<https://networkbuilders.intel.com/network-technologies/dpdk>`_.
+Once approval has been granted, the user needs to log in
+`<https://networkbuilders.intel.com/dpdklogin>`_
+and click on "Snow3G Bit Stream crypto library" link, to download the library.
+After downloading the library, the user needs to unpack and compile it
+on their system before building DPDK::
+ make snow3G
Initialization
--------------
In order to enable this virtual crypto PMD, user must:
-* Export the environmental variable LIBSSO_PATH with the path where
- the library was extracted.
+* Export the environmental variable LIBSSO_SNOW3G_PATH with the path where
+ the library was extracted (snow3g folder).
-* Build the LIBSSO library (explained in Installation section).
+* Build the LIBSSO_SNOW3G library (explained in Installation section).
* Set CONFIG_RTE_LIBRTE_PMD_SNOW3G=y in config/common_base.
diff --git a/doc/guides/faq/faq.rst b/doc/guides/faq/faq.rst
index 368374f5..3228b92c 100644
--- a/doc/guides/faq/faq.rst
+++ b/doc/guides/faq/faq.rst
@@ -88,9 +88,7 @@ the wrong socket, the application simply will not start.
On application startup, there is a lot of EAL information printed. Is there any way to reduce this?
---------------------------------------------------------------------------------------------------
-Yes, each EAL has a configuration file that is located in the /config directory. Within each configuration file, you will find CONFIG_RTE_LOG_LEVEL=8.
-You can change this to a lower value, such as 6 to reduce this printout of debug information. The following is a list of LOG levels that can be found in the rte_log.h file.
-You must remove, then rebuild, the EAL directory for the change to become effective as the configuration file creates the rte_config.h file in the EAL directory.
+Yes, the option ``--log-level=`` accepts one of these numbers:
.. code-block:: c
@@ -103,6 +101,9 @@ You must remove, then rebuild, the EAL directory for the change to become effect
#define RTE_LOG_INFO 7U /* Informational. */
#define RTE_LOG_DEBUG 8U /* Debug-level messages. */
+It is also possible to change the maximum (and default level) at compile time
+with ``CONFIG_RTE_LOG_LEVEL``.
+
How can I tune my network application to achieve lower latency?
---------------------------------------------------------------
diff --git a/doc/guides/freebsd_gsg/build_dpdk.rst b/doc/guides/freebsd_gsg/build_dpdk.rst
index ceacf7f8..1d92c089 100644
--- a/doc/guides/freebsd_gsg/build_dpdk.rst
+++ b/doc/guides/freebsd_gsg/build_dpdk.rst
@@ -263,7 +263,7 @@ To avoid this error, reduce the number of buffers or the buffer size.
Loading the DPDK nic_uio Module
-------------------------------
-After loading the contigmem module, the ``nic_uio must`` also be loaded into the
+After loading the contigmem module, the ``nic_uio`` module must also be loaded into the
running kernel prior to running any DPDK application. This module must
be loaded using the kldload command as shown below (assuming that the current
directory is the DPDK target directory).
diff --git a/doc/guides/linux_gsg/enable_func.rst b/doc/guides/linux_gsg/enable_func.rst
index 076770f2..ec0e04d9 100644
--- a/doc/guides/linux_gsg/enable_func.rst
+++ b/doc/guides/linux_gsg/enable_func.rst
@@ -186,21 +186,6 @@ Check with the local Intel's Network Division application engineers for firmware
The base driver to support firmware version of FVL3E will be integrated in the next
DPDK release, so currently the validated firmware version is 4.2.6.
-Enabling Extended Tag
-~~~~~~~~~~~~~~~~~~~~~
-
-PCI configuration of ``extended_tag`` has big impact on small packet size
-performance of 40G ports. Enabling ``extended_tag`` can help 40G port to
-achieve the best performance, especially for small packet size.
-
-* Disabling/enabling ``extended_tag`` can be done in some BIOS implementations.
-
-* If BIOS does not enable it, and does not support changing it, tools
- (e.g. ``setpci`` on Linux) can be used to enable or disable ``extended_tag``.
-
-* From release 16.04, ``extended_tag`` is enabled by default during port
- initialization, users don't need to care about that anymore.
-
Use 16 Bytes RX Descriptor Size
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/linux_gsg/sys_reqs.rst b/doc/guides/linux_gsg/sys_reqs.rst
index 959709e5..b3215448 100644
--- a/doc/guides/linux_gsg/sys_reqs.rst
+++ b/doc/guides/linux_gsg/sys_reqs.rst
@@ -101,6 +101,9 @@ Compilation of the DPDK
* libpcap headers and libraries (libpcap-devel) to compile and use the libpcap-based poll-mode driver.
This driver is disabled by default and can be enabled by setting ``CONFIG_RTE_LIBRTE_PMD_PCAP=y`` in the build time config file.
+* libarchive headers and library are needed for some unit tests using tar to get their resources.
+
+
Running DPDK Applications
-------------------------
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
new file mode 100644
index 00000000..2669e984
--- /dev/null
+++ b/doc/guides/nics/bnxt.rst
@@ -0,0 +1,49 @@
+.. BSD LICENSE
+ Copyright 2016 Broadcom Limited
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Broadcom Limited nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+bnxt poll mode driver library
+=============================
+
+The bnxt poll mode library (**librte_pmd_bnxt**) implements support for
+**Broadcom NetXtreme® C-Series**. These adapters support Standards-
+compliant 10/25/50Gbps 30MPPS full-duplex throughput.
+
+Information about this family of adapters can be found in the
+`NetXtreme® Brand section <https://www.broadcom.com/products/ethernet-communication-and-switching?technology%5B%5D=88>`_
+of the `Broadcom web site <http://www.broadcom.com/>`_.
+
+Limitations
+-----------
+
+With the current driver, allocated mbufs must be large enough to hold
+the entire received frame. If the mbufs are not large enough, the
+packets will be dropped. This is most limiting when jumbo frames are
+used.
+
+SR-IOV is not supported.
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index c4915d82..7fc48624 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -157,10 +157,9 @@ Switch manager
The Intel FM10000 family of NICs integrate a hardware switch and multiple host
interfaces. The FM10000 PMD driver only manages host interfaces. For the
switch component another switch driver has to be loaded prior to to the
-FM10000 PMD driver. The switch driver can be acquired for Intel support or
-from the `Match Interface <https://github.com/match-interface>`_ project.
+FM10000 PMD driver. The switch driver can be acquired from Intel support.
Only Testpoint is validated with DPDK, the latest version that has been
-validated with DPDK2.2 is 4.1.6.
+validated with DPDK is 4.1.6.
CRC striping
~~~~~~~~~~~~
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 934eb027..da695afd 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -366,3 +366,48 @@ Delete all flow director rules on a port:
testpmd> flush_flow_director 0
+Floating VEB
+~~~~~~~~~~~~~
+
+The Intel® Ethernet Controller X710 and XL710 Family support a feature called
+"Floating VEB".
+
+A Virtual Ethernet Bridge (VEB) is an IEEE Edge Virtual Bridging (EVB) term
+for functionality that allows local switching between virtual endpoints within
+a physical endpoint and also with an external bridge/network.
+
+A "Floating" VEB doesn't have an uplink connection to the outside world so all
+switching is done internally and remains within the host. As such, this
+feature provides security benefits.
+
+In addition, a Floating VEB overcomes a limitation of normal VEBs where they
+cannot forward packets when the physical link is down. Floating VEBs don't need
+to connect to the NIC port so they can still forward traffic from VF to VF
+even when the physical link is down.
+
+Therefore, with this feature enabled VFs can be limited to communicating with
+each other but not an outside network, and they can do so even when there is
+no physical uplink on the associated NIC port.
+
+To enable this feature, the user should pass a ``devargs`` parameter to the
+EAL, for example::
+
+ -w 84:00.0,enable_floating_veb=1
+
+In this configuration the PMD will use the floating VEB feature for all the
+VFs created by this PF device.
+
+Alternatively, the user can specify which VFs need to connect to this floating
+VEB using the ``floating_veb_list`` argument::
+
+ -w 84:00.0,enable_floating_veb=1,floating_veb_list=1;3-4
+
+In this example ``VF1``, ``VF3`` and ``VF4`` connect to the floating VEB,
+while other VFs connect to the normal VEB.
+
+The current implementation only supports one floating VEB and one regular
+VEB. VFs can connect to a floating VEB or a regular VEB according to the
+configuration passed on the EAL command line.
+
+The floating VEB functionality requires a NIC firmware version of 5.0
+or greater.
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 769f6770..92d56a59 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -37,6 +37,7 @@ Network Interface Controller Drivers
overview
bnx2x
+ bnxt
cxgbe
e1000em
ena
@@ -48,7 +49,9 @@ Network Interface Controller Drivers
mlx4
mlx5
nfp
+ qede
szedata2
+ thunderx
virtio
vhost
vmxnet3
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index b6f91e6a..063c4a54 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -86,11 +86,11 @@ Features
- Hardware checksum offloads.
- Flow director (RTE_FDIR_MODE_PERFECT and RTE_FDIR_MODE_PERFECT_MAC_VLAN).
- Secondary process TX is supported.
+- KVM and VMware ESX SR-IOV modes are supported.
Limitations
-----------
-- KVM and VMware ESX SR-IOV modes are not supported yet.
- Inner RSS for VXLAN frames is not supported yet.
- Port statistics through software counters only.
- Hardware checksum offloads for VXLAN inner header are not supported yet.
@@ -114,23 +114,6 @@ These options can be modified in the ``.config`` file.
adds additional run-time checks and debugging messages at the cost of
lower performance.
-- ``CONFIG_RTE_LIBRTE_MLX5_SGE_WR_N`` (default **4**)
-
- Number of scatter/gather elements (SGEs) per work request (WR). Lowering
- this number improves performance but also limits the ability to receive
- scattered packets (packets that do not fit a single mbuf). The default
- value is a safe tradeoff.
-
-- ``CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE`` (default **0**)
-
- Amount of data to be inlined during TX operations. Improves latency.
- Can improve PPS performance when PCI backpressure is detected and may be
- useful for scenarios involving heavy traffic on many queues.
-
- Since the additional software logic necessary to handle this mode can
- lower performance when there is no backpressure, it is not enabled by
- default.
-
- ``CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE`` (default **8**)
Maximum number of cached memory pools (MPs) per TX queue. Each MP from
@@ -142,16 +125,6 @@ These options can be modified in the ``.config`` file.
Environment variables
~~~~~~~~~~~~~~~~~~~~~
-- ``MLX5_ENABLE_CQE_COMPRESSION``
-
- A nonzero value lets ConnectX-4 return smaller completion entries to
- improve performance when PCI backpressure is detected. It is most useful
- for scenarios involving heavy traffic on many queues.
-
- Since the additional software logic necessary to handle this mode can
- lower performance when there is no backpressure, it is not enabled by
- default.
-
- ``MLX5_PMD_ENABLE_PADDING``
Enables HW packet padding in PCI bus transactions.
@@ -175,6 +148,39 @@ Run-time configuration
- **ethtool** operations on related kernel interfaces also affect the PMD.
+- ``rxq_cqe_comp_en`` parameter [int]
+
+ A nonzero value enables the compression of CQE on RX side. This feature
+ allows to save PCI bandwidth and improve performance at the cost of a
+ slightly higher CPU usage. Enabled by default.
+
+- ``txq_inline`` parameter [int]
+
+ Amount of data to be inlined during TX operations. Improves latency.
+ Can improve PPS performance when PCI back pressure is detected and may be
+ useful for scenarios involving heavy traffic on many queues.
+
+ It is not enabled by default (set to 0) since the additional software
+ logic necessary to handle this mode can lower performance when back
+ pressure is not expected.
+
+- ``txqs_min_inline`` parameter [int]
+
+ Enable inline send only when the number of TX queues is greater or equal
+ to this value.
+
+ This option should be used in combination with ``txq_inline`` above.
+
+- ``txq_mpw_en`` parameter [int]
+
+ A nonzero value enables multi-packet send. This feature allows the TX
+ burst function to pack up to five packets in two descriptors in order to
+ save PCI bandwidth and improve performance at the cost of a slightly
+ higher CPU usage.
+
+ It is currently only supported on the ConnectX-4 Lx family of adapters.
+ Enabled by default.
+
Prerequisites
-------------
@@ -228,40 +234,12 @@ DPDK and must be installed separately:
Currently supported by DPDK:
-- Mellanox OFED **3.1-1.0.3**, **3.1-1.5.7.1** or **3.2-2.0.0.0** depending
- on usage.
-
- The following features are supported with version **3.1-1.5.7.1** and
- above only:
-
- - IPv6, UPDv6, TCPv6 RSS.
- - RX checksum offloads.
- - IBM POWER8.
-
- The following features are supported with version **3.2-2.0.0.0** and
- above only:
-
- - Flow director.
- - RX VLAN stripping.
- - TX VLAN insertion.
- - RX CRC stripping configuration.
+- Mellanox OFED **3.3-1.0.0.0**.
- Minimum firmware version:
- With MLNX_OFED **3.1-1.0.3**:
-
- - ConnectX-4: **12.12.1240**
- - ConnectX-4 Lx: **14.12.1100**
-
- With MLNX_OFED **3.1-1.5.7.1**:
-
- - ConnectX-4: **12.13.0144**
- - ConnectX-4 Lx: **14.13.0144**
-
- With MLNX_OFED **3.2-2.0.0.0**:
-
- - ConnectX-4: **12.14.2036**
- - ConnectX-4 Lx: **14.14.2036**
+ - ConnectX-4: **12.16.1006**
+ - ConnectX-4 Lx: **14.16.1006**
Getting Mellanox OFED
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/nics/nfp.rst b/doc/guides/nics/nfp.rst
index dfc36836..e4ebc712 100644
--- a/doc/guides/nics/nfp.rst
+++ b/doc/guides/nics/nfp.rst
@@ -61,9 +61,8 @@ instructions.
DPDK runs in userspace and PMDs uses the Linux kernel UIO interface to
allow access to physical devices from userspace. The NFP PMD requires
-a separate UIO driver, **nfp_uio**, to perform correct
-initialization. This driver is part of Netronome´s BSP and it is
-equivalent to Intel's igb_uio driver.
+the **igb_uio** UIO driver, available with DPDK, to perform correct
+initialization.
Building the software
---------------------
@@ -201,27 +200,18 @@ Using the NFP PMD is not different to using other PMDs. Usual steps are:
The module should now be listed by the lsmod command.
-#. **To install the nfp_uio kernel module (manually):** This module supports
- NFP-6xxx devices through the UIO interface.
-
- This module is part of Netronome´s BSP and it should be available when the
- BSP is installed.
+#. **To install the igb_uio kernel module (manually):** This module is part
+ of DPDK sources and configured by default (CONFIG_RTE_EAL_IGB_UIO=y).
.. code-block:: console
- modprobe nfp_uio.ko
+ modprobe igb_uio.ko
The module should now be listed by the lsmod command.
- Depending on which NFP modules are loaded, nfp_uio may be automatically
- bound to the NFP PCI devices by the system. Otherwise the binding needs
- to be done explicitly. This is the case when nfp_netvf, the Linux kernel
- driver for NFP VFs, was loaded when VFs were created. As described later
- in this document this configuration may also be performed using scripts
- provided by the Netronome´s BSP.
-
- First the device needs to be unbound, for example from the nfp_netvf
- driver:
+ Depending on which NFP modules are loaded, it could be necessary to
+ detach NFP devices from the nfp_netvf module. If this is the case the
+ device needs to be unbound, for example:
.. code-block:: console
@@ -232,30 +222,25 @@ Using the NFP PMD is not different to using other PMDs. Usual steps are:
The output of lspci should now show that 0000:03:08.0 is not bound to
any driver.
- The next step is to add the NFP PCI ID to the NFP UIO driver:
+ The next step is to add the NFP PCI ID to the IGB UIO driver:
.. code-block:: console
- echo 19ee 6003 > /sys/bus/pci/drivers/nfp_uio/new_id
+ echo 19ee 6003 > /sys/bus/pci/drivers/igb_uio/new_id
- And then to bind the device to the nfp_uio driver:
+ And then to bind the device to the igb_uio driver:
.. code-block:: console
- echo 0000:03:08.0 > /sys/bus/pci/drivers/nfp_uio/bind
+ echo 0000:03:08.0 > /sys/bus/pci/drivers/igb_uio/bind
lspci -d19ee: -k
- lspci should show that device bound to nfp_uio driver.
-
-#. **Using tools from Netronome´s BSP to install and bind modules:** DPDK provides
- scripts which are useful for installing the UIO modules and for binding the
- right device to those modules avoiding doing so manually. However, these scripts
- have not support for Netronome´s UIO driver. Along with drivers, the BSP installs
- those DPDK scripts slightly modified with support for Netronome´s UIO driver.
+ lspci should show that device bound to igb_uio driver.
- Those specific scripts can be found in Netronome´s BSP installation directory.
- Refer to BSP documentation for more information.
+#. **Using scripts to install and bind modules:** DPDK provides scripts which are
+ useful for installing the UIO modules and for binding the right device to those
+ modules avoiding doing so manually:
* **setup.sh**
* **dpdk_nic_bind.py**
diff --git a/doc/guides/nics/overview.rst b/doc/guides/nics/overview.rst
index ed116e31..a23eb5cc 100644
--- a/doc/guides/nics/overview.rst
+++ b/doc/guides/nics/overview.rst
@@ -58,7 +58,7 @@ Most of these differences are summarized below.
white-space: pre-wrap;
text-align: center;
vertical-align: top;
- padding: 3px;
+ padding: 2px;
}
table#id1 th:first-child {
vertical-align: bottom;
@@ -74,76 +74,81 @@ Most of these differences are summarized below.
.. table:: Features availability in networking drivers
- ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
- Feature a b b b c e e e i i i i i i i i i i f f f f m m m n n p r s v v v v x
- f n n o x 1 n n 4 4 4 4 g g x x x x m m m m l l p f u c i z h i i m e
- p x x n g 0 a i 0 0 0 0 b b g g g g 1 1 1 1 x x i p l a n e o r r x n
- a 2 2 d b 0 c e e e e v b b b b 0 0 0 0 4 5 p l p g d s t t n v
- c x x i e 0 . v v f e e e e k k k k e a t i i e i
- k v n . f f . v v . v v t o o t r
- e f g . . . f f . f f a . 3 t
- t v v v v v v 2 v
- e e e e e e e
- c c c c c c c
- ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
- speed capabilities
- link status X X X X X X X X X X X X X X X X X X
- link status event X X X X X X X X X X X
- queue status event X
- Rx interrupt X X X X X X X X X X X X X X X
- queue start/stop X X X X X X X X X X X X X X X X X X
- MTU update X X X X X X X X X X
- jumbo frame X X X X X X X X X X X X X X X X X X X X
- scattered Rx X X X X X X X X X X X X X X X X X X X X X
- LRO X X X X
- TSO X X X X X X X X X X X X X X X X
- promiscuous mode X X X X X X X X X X X X X X X X X X X X
- allmulticast mode X X X X X X X X X X X X X X X X X X X
- unicast MAC filter X X X X X X X X X X X X X X X X X X X X
- multicast MAC filter X X X X X X X X X X X X X
- RSS hash X X X X X X X X X X X X X X X X X X
- RSS key update X X X X X X X X X X X X X X X
- RSS reta update X X X X X X X X X X X X X X X
- VMDq X X X X X X X
- SR-IOV X X X X X X X X X
- DCB X X X X X
- VLAN filter X X X X X X X X X X X X X X X X X X
- ethertype filter X X X X X
- n-tuple filter X X X
- SYN filter X X X
- tunnel filter X X X X
- flexible filter X
- hash filter X X X X
- flow director X X X X X
- flow control X X X X X X X
- rate limitation X X
- traffic mirroring X X X X
- CRC offload X X X X X X X X X X X X X X X
- VLAN offload X X X X X X X X X X X X X X X
- QinQ offload X X X X X X X
- L3 checksum offload X X X X X X X X X X X X X X X X
- L4 checksum offload X X X X X X X X X X X X X X X X
- inner L3 checksum X X X X X X
- inner L4 checksum X X X X X X
- packet type parsing X X X X X X X X X X X X X X
- timesync X X X X X
- basic stats X X X X X X X X X X X X X X X X X X X X X X X X X X X
- extended stats X X X X X X X X X X X X X X X X X
- stats per queue X X X X X X X X X X X X
- EEPROM dump X X X
- registers dump X X X X X X
- multiprocess aware X X X X X X X X X X X X X X X
- BSD nic_uio X X X X X X X X X X X X X X X X X X X
- Linux UIO X X X X X X X X X X X X X X X X X X X X X X
- Linux VFIO X X X X X X X X X X X X X X X X X X X
- other kdrv X X X
- ARMv7 X X X
- ARMv8 X X X
- Power8 X X X
- TILE-Gx X
- x86-32 X X X X X X X X X X X X X X X X X X X X X X X X
- x86-64 X X X X X X X X X X X X X X X X X X X X X X X X X X X
- usage doc X X X X X X X X X
- design doc
- perf doc
- ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
+ ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
+ Feature a b b b b c e e e i i i i i i i i i i f f f f m m m n n p q q r s t v v v v x
+ f n n n o x 1 n n 4 4 4 4 g g x x x x m m m m l l p f u c e e i z h h i i m e
+ p x x x n g 0 a i 0 0 0 0 b b g g g g 1 1 1 1 x x i p l a d d n e u o r r x n
+ a 2 2 t d b 0 c e e e e v b b b b 0 0 0 0 4 5 p l p e e g d n s t t n v
+ c x x i e 0 . v v f e e e e k k k k e v a d t i i e i
+ k v n . f f . v v . v v f t e o o t r
+ e f g . . . f f . f f a r . 3 t
+ t v v v v v v 2 x v
+ e e e e e e e
+ c c c c c c c
+ ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
+ Speed capabilities
+ Link status Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Link status event Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Queue status event Y
+ Rx interrupt Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Queue start/stop Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ MTU update Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Jumbo frame Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Scattered Rx Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ LRO Y Y Y Y
+ TSO Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Promiscuous mode Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Allmulticast mode Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Unicast MAC filter Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Multicast MAC filter Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ RSS hash Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ RSS key update Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ RSS reta update Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ VMDq Y Y Y Y Y Y Y
+ SR-IOV Y Y Y Y Y Y Y Y Y Y Y
+ DCB Y Y Y Y Y
+ VLAN filter Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Ethertype filter Y Y Y Y Y
+ N-tuple filter Y Y Y
+ SYN filter Y Y Y
+ Tunnel filter Y Y Y Y
+ Flexible filter Y
+ Hash filter Y Y Y Y
+ Flow director Y Y Y Y Y
+ Flow control Y Y Y Y Y Y Y Y Y
+ Rate limitation Y Y
+ Traffic mirroring Y Y Y Y
+ CRC offload Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ VLAN offload Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y P
+ QinQ offload Y Y Y Y Y Y Y
+ L3 checksum offload Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ L4 checksum offload Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Inner L3 checksum Y Y Y Y Y Y
+ Inner L4 checksum Y Y Y Y Y Y
+ Packet type parsing Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Timesync Y Y Y Y Y
+ Basic stats Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Extended stats Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Stats per queue Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ EEPROM dump Y Y Y Y
+ Registers dump Y Y Y Y Y Y Y Y
+ Multiprocess aware Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ BSD nic_uio Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Linux UIO Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Linux VFIO Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Other kdrv Y Y Y
+ ARMv7 Y Y Y
+ ARMv8 Y Y Y Y Y Y Y Y
+ Power8 Y Y Y
+ TILE-Gx Y
+ x86-32 Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ x86-64 Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
+ Usage doc Y Y Y Y Y Y Y Y Y Y Y Y
+ Design doc
+ Perf doc
+ ==================== = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
+
+.. Note::
+
+ Features marked with "P" are partially supported. Refer to the appropriate
+ NIC guide in the following sections for details.
diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst
new file mode 100644
index 00000000..f7ca8eb9
--- /dev/null
+++ b/doc/guides/nics/qede.rst
@@ -0,0 +1,314 @@
+.. BSD LICENSE
+ Copyright (c) 2016 QLogic Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of QLogic Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+QEDE Poll Mode Driver
+======================
+
+The QEDE poll mode driver library (**librte_pmd_qede**) implements support
+for **QLogic FastLinQ QL4xxxx 25G/40G CNA** family of adapters as well
+as their virtual functions (VF) in SR-IOV context. It is supported on
+several standard Linux distros like RHEL7.x, SLES12.x and Ubuntu.
+It is compile-tested under FreeBSD OS.
+
+More information can be found at `QLogic Corporation's Website
+<http://www.qlogic.com>`_.
+
+Supported Features
+------------------
+
+- Unicast/Multicast filtering
+- Promiscuous mode
+- Allmulti mode
+- Port hardware statistics
+- Jumbo frames (using single buffer)
+- VLAN offload - Filtering and stripping
+- Stateless checksum offloads (IPv4/TCP/UDP)
+- Multiple Rx/Tx queues (queue-pairs)
+- RSS (with user configurable table/key)
+- TSS
+- Multiple MAC address
+- Default pause flow control
+- SR-IOV VF for 25G/40G modes
+
+Non-supported Features
+----------------------
+
+- Scatter-Gather Rx/Tx frames
+- Unequal number of Rx/Tx queues
+- MTU change (dynamic)
+- SR-IOV PF
+- Tunneling offloads
+- Reload of the PMD after a non-graceful termination
+
+Supported QLogic Adapters
+-------------------------
+
+- QLogic FastLinQ QL4xxxx 25G/40G/100G CNAs.
+
+Prerequisites
+-------------
+
+- Requires firmware version **8.7.x.** and management firmware
+ version **8.7.x or higher**. Firmware may be available
+ inbox in certain newer Linux distros under the standard directory
+ ``E.g. /lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin``
+
+- If the required firmware files are not available then visit
+ `QLogic Driver Download Center <http://driverdownloads.qlogic.com>`_.
+
+- This driver relies on external zlib library (-lz) for uncompressing
+ the firmware file.
+
+Performance note
+~~~~~~~~~~~~~~~~
+
+- For better performance, it is recommended to use 4K or higher RX/TX rings.
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``.config`` file. Please note that
+enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_PMD`` (default **y**)
+
+ Toggle compilation of QEDE PMD driver.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_DEBUG_INFO`` (default **n**)
+
+ Toggle display of generic debugging messages.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_DEBUG_DRIVER`` (default **n**)
+
+ Toggle display of ecore related messages.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX`` (default **n**)
+
+ Toggle display of transmit fast path run-time messages.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX`` (default **n**)
+
+ Toggle display of receive fast path run-time messages.
+
+- ``CONFIG_RTE_LIBRTE_QEDE_FW`` (default **""**)
+
+ Gives absolute path of firmware file.
+ ``Eg: "/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin"``
+ Empty string indicates driver will pick up the firmware file
+ from the default location.
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile QEDE PMD for Linux x86_64 gcc target, run the following ``make``
+command::
+
+ cd <DPDK-source-directory>
+ make config T=x86_64-native-linuxapp-gcc install
+
+To compile QEDE PMD for Linux x86_64 clang target, run the following ``make``
+command::
+
+ cd <DPDK-source-directory>
+ make config T=x86_64-native-linuxapp-clang install
+
+To compile QEDE PMD for FreeBSD x86_64 clang target, run the following ``gmake``
+command::
+
+ cd <DPDK-source-directory>
+ gmake config T=x86_64-native-bsdapp-clang install
+
+To compile QEDE PMD for FreeBSD x86_64 gcc target, run the following ``gmake``
+command::
+
+ cd <DPDK-source-directory>
+ gmake config T=x86_64-native-bsdapp-gcc install -Wl,-rpath=\
+ /usr/local/lib/gcc48 CC=gcc48
+
+
+Sample Application Notes
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section demonstrates how to launch ``testpmd`` with QLogic 4xxxx
+devices managed by ``librte_pmd_qede`` in Linux operating system.
+
+#. Request huge pages:
+
+ .. code-block:: console
+
+ echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages/ \
+ nr_hugepages
+
+#. Load ``igb_uio`` driver:
+
+ .. code-block:: console
+
+ insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+
+#. Bind the QLogic 4xxxx adapters to ``igb_uio`` loaded in the
+ previous step:
+
+ .. code-block:: console
+
+ ./tools/dpdk_nic_bind.py --bind igb_uio 0000:84:00.0 0000:84:00.1 \
+ 0000:84:00.2 0000:84:00.3
+
+#. Start ``testpmd`` with basic parameters:
+ (Enable QEDE_DEBUG_INFO=y to view informational messages)
+
+ .. code-block:: console
+
+ testpmd -c 0xff1 -n 4 -- -i --nb-cores=8 --portmask=0xf --rxd=4096 \
+ --txd=4096 --txfreet=4068 --enable-rx-cksum --rxq=4 --txq=4 \
+ --rss-ip --rss-udp
+
+ [...]
+
+ EAL: PCI device 0000:84:00.0 on NUMA socket 1
+ EAL: probe driver: 1077:1634 rte_qede_pmd
+ EAL: Not managed by a supported kernel driver, skipped
+ EAL: PCI device 0000:84:00.1 on NUMA socket 1
+ EAL: probe driver: 1077:1634 rte_qede_pmd
+ EAL: Not managed by a supported kernel driver, skipped
+ EAL: PCI device 0000:88:00.0 on NUMA socket 1
+ EAL: probe driver: 1077:1656 rte_qede_pmd
+ EAL: PCI memory mapped at 0x7f738b200000
+ EAL: PCI memory mapped at 0x7f738b280000
+ EAL: PCI memory mapped at 0x7f738b300000
+ PMD: Chip details : BB1
+ PMD: Driver version : QEDE PMD 8.7.9.0_1.0.0
+ PMD: Firmware version : 8.7.7.0
+ PMD: Management firmware version : 8.7.8.0
+ PMD: Firmware file : /lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_common_dev_init:macaddr \
+ 00:0e:1e:d2:09:9c
+ [...]
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_tx_queue_setup:txq 0 num_desc 4096 \
+ tx_free_thresh 4068 socket 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_tx_queue_setup:txq 1 num_desc 4096 \
+ tx_free_thresh 4068 socket 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_tx_queue_setup:txq 2 num_desc 4096 \
+ tx_free_thresh 4068 socket 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_tx_queue_setup:txq 3 num_desc 4096 \
+ tx_free_thresh 4068 socket 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_rx_queue_setup:rxq 0 num_desc 4096 \
+ rx_buf_size=2148 socket 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_rx_queue_setup:rxq 1 num_desc 4096 \
+ rx_buf_size=2148 socket 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_rx_queue_setup:rxq 2 num_desc 4096 \
+ rx_buf_size=2148 socket 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_rx_queue_setup:rxq 3 num_desc 4096 \
+ rx_buf_size=2148 socket 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_dev_start:port 0
+ [QEDE PMD: (84:00.0:dpdk-port-0)]qede_dev_start:link status: down
+ [...]
+ Checking link statuses...
+ Port 0 Link Up - speed 25000 Mbps - full-duplex
+ Port 1 Link Up - speed 25000 Mbps - full-duplex
+ Port 2 Link Up - speed 25000 Mbps - full-duplex
+ Port 3 Link Up - speed 25000 Mbps - full-duplex
+ Done
+ testpmd>
+
+
+SR-IOV: Prerequisites and Sample Application Notes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section provides instructions to configure SR-IOV with Linux OS.
+
+**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (QEDE) will function as SR-IOV PF driver.
+
+#. Verify SR-IOV and ARI capability is enabled on the adapter using ``lspci``:
+
+ .. code-block:: console
+
+ lspci -s <slot> -vvv
+
+ Example output:
+
+ .. code-block:: console
+
+ [...]
+ Capabilities: [1b8 v1] Alternative Routing-ID Interpretation (ARI)
+ [...]
+ Capabilities: [1c0 v1] Single Root I/O Virtualization (SR-IOV)
+ [...]
+ Kernel driver in use: igb_uio
+
+#. Load the kernel module:
+
+ .. code-block:: console
+
+ modprobe qede
+
+ Example output:
+
+ .. code-block:: console
+
+ systemd-udevd[4848]: renamed network interface eth0 to ens5f0
+ systemd-udevd[4848]: renamed network interface eth1 to ens5f1
+
+#. Bring up the PF ports:
+
+ .. code-block:: console
+
+ ifconfig ens5f0 up
+ ifconfig ens5f1 up
+
+#. Create VF device(s):
+
+ Echo the number of VFs to be created into ``"sriov_numvfs"`` sysfs entry
+ of the parent PF.
+
+ Example output:
+
+ .. code-block:: console
+
+ echo 2 > /sys/devices/pci0000:00/0000:00:03.0/0000:81:00.0/sriov_numvfs
+
+
+#. Assign VF MAC address:
+
+ Assign MAC address to the VF using iproute2 utility. The syntax is::
+
+ ip link set <PF iface> vf <VF id> mac <macaddr>
+
+ Example output:
+
+ .. code-block:: console
+
+ ip link set ens5f0 vf 0 mac 52:54:00:2f:9d:e8
+
+
+#. PCI Passthrough:
+
+ The VF devices may be passed through to the guest VM using ``virt-manager`` or
+ ``virsh``. QEDE PMD should be used to bind the VF devices in the guest VM
+ using the instructions outlined in the Application notes above.
diff --git a/doc/guides/nics/thunderx.rst b/doc/guides/nics/thunderx.rst
new file mode 100644
index 00000000..e38f260b
--- /dev/null
+++ b/doc/guides/nics/thunderx.rst
@@ -0,0 +1,354 @@
+.. BSD LICENSE
+ Copyright (C) Cavium networks Ltd. 2016.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium networks nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ThunderX NICVF Poll Mode Driver
+===============================
+
+The ThunderX NICVF PMD (**librte_pmd_thunderx_nicvf**) provides poll mode driver
+support for the inbuilt NIC found in the **Cavium ThunderX** SoC family
+as well as their virtual functions (VF) in SR-IOV context.
+
+More information can be found at `Cavium Networks Official Website
+<http://www.cavium.com/ThunderX_ARM_Processors.html>`_.
+
+Features
+--------
+
+Features of the ThunderX PMD are:
+
+- Multiple queues for TX and RX
+- Receive Side Scaling (RSS)
+- Packet type information
+- Checksum offload
+- Promiscuous mode
+- Multicast mode
+- Port hardware statistics
+- Jumbo frames
+- Link state information
+- Scattered and gather for TX and RX
+- VLAN stripping
+- SR-IOV VF
+- NUMA support
+
+Supported ThunderX SoCs
+-----------------------
+- CN88xx
+
+Prerequisites
+-------------
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD`` (default ``n``)
+
+ By default it is enabled only for defconfig_arm64-thunderx-* config.
+ Toggle compilation of the ``librte_pmd_thunderx_nicvf`` driver.
+
+- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT`` (default ``n``)
+
+ Toggle display of initialization related messages.
+
+- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX`` (default ``n``)
+
+ Toggle display of receive fast path run-time message
+
+- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX`` (default ``n``)
+
+ Toggle display of transmit fast path run-time message
+
+- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER`` (default ``n``)
+
+ Toggle display of generic debugging messages
+
+- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX`` (default ``n``)
+
+ Toggle display of PF mailbox related run-time check messages
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile the ThunderX NICVF PMD for Linux arm64 gcc target, run the
+following “make” command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-thunderx-linuxapp-gcc install
+
+Linux
+-----
+
+.. _thunderx_testpmd_example:
+
+Running testpmd
+~~~~~~~~~~~~~~~
+
+This section demonstrates how to launch ``testpmd`` with ThunderX NIC VF device
+managed by ``librte_pmd_thunderx_nicvf`` in the Linux operating system.
+
+#. Load ``vfio-pci`` driver:
+
+ .. code-block:: console
+
+ modprobe vfio-pci
+
+ .. _thunderx_vfio_noiommu:
+
+#. Enable **VFIO-NOIOMMU** mode (optional):
+
+ .. code-block:: console
+
+ echo 1 > /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
+
+ .. note::
+
+ **VFIO-NOIOMMU** is required only when running in VM context and should not be enabled otherwise.
+ See also :ref:`SR-IOV: Prerequisites and sample Application Notes <thunderx_sriov_example>`.
+
+#. Bind the ThunderX NIC VF device to ``vfio-pci`` loaded in the previous step:
+
+ Setup VFIO permissions for regular users and then bind to ``vfio-pci``:
+
+ .. code-block:: console
+
+ ./tools/dpdk_nic_bind.py --bind vfio-pci 0002:01:00.2
+
+#. Start ``testpmd`` with basic parameters:
+
+ .. code-block:: console
+
+ ./arm64-thunderx-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0002:01:00.2 \
+ -- -i --disable-hw-vlan-filter --crc-strip --no-flush-rx \
+ --port-topology=loop
+
+ Example output:
+
+ .. code-block:: console
+
+ ...
+
+ PMD: rte_nicvf_pmd_init(): librte_pmd_thunderx nicvf version 1.0
+
+ ...
+ EAL: probe driver: 177d:11 rte_nicvf_pmd
+ EAL: using IOMMU type 1 (Type 1)
+ EAL: PCI memory mapped at 0x3ffade50000
+ EAL: Trying to map BAR 4 that contains the MSI-X table.
+ Trying offsets: 0x40000000000:0x0000, 0x10000:0x1f0000
+ EAL: PCI memory mapped at 0x3ffadc60000
+ PMD: nicvf_eth_dev_init(): nicvf: device (177d:11) 2:1:0:2
+ PMD: nicvf_eth_dev_init(): node=0 vf=1 mode=tns-bypass sqs=false
+ loopback_supported=true
+ PMD: nicvf_eth_dev_init(): Port 0 (177d:11) mac=a6:c6:d9:17:78:01
+ Interactive-mode selected
+ Configuring Port 0 (socket 0)
+ ...
+
+ PMD: nicvf_dev_configure(): Configured ethdev port0 hwcap=0x0
+ Port 0: A6:C6:D9:17:78:01
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
+
+.. _thunderx_sriov_example:
+
+SR-IOV: Prerequisites and sample Application Notes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Current ThunderX NIC PF/VF kernel modules maps each physical Ethernet port
+automatically to virtual function (VF) and presented them as PCIe-like SR-IOV device.
+This section provides instructions to configure SR-IOV with Linux OS.
+
+#. Verify PF devices capabilities using ``lspci``:
+
+ .. code-block:: console
+
+ lspci -vvv
+
+ Example output:
+
+ .. code-block:: console
+
+ 0002:01:00.0 Ethernet controller: Cavium Networks Device a01e (rev 01)
+ ...
+ Capabilities: [100 v1] Alternative Routing-ID Interpretation (ARI)
+ ...
+ Capabilities: [180 v1] Single Root I/O Virtualization (SR-IOV)
+ ...
+ Kernel driver in use: thunder-nic
+ ...
+
+ .. note::
+
+ Unless ``thunder-nic`` driver is in use make sure your kernel config includes ``CONFIG_THUNDER_NIC_PF`` setting.
+
+#. Verify VF devices capabilities and drivers using ``lspci``:
+
+ .. code-block:: console
+
+ lspci -vvv
+
+ Example output:
+
+ .. code-block:: console
+
+ 0002:01:00.1 Ethernet controller: Cavium Networks Device 0011 (rev 01)
+ ...
+ Capabilities: [100 v1] Alternative Routing-ID Interpretation (ARI)
+ ...
+ Kernel driver in use: thunder-nicvf
+ ...
+
+ 0002:01:00.2 Ethernet controller: Cavium Networks Device 0011 (rev 01)
+ ...
+ Capabilities: [100 v1] Alternative Routing-ID Interpretation (ARI)
+ ...
+ Kernel driver in use: thunder-nicvf
+ ...
+
+ .. note::
+
+ Unless ``thunder-nicvf`` driver is in use make sure your kernel config includes ``CONFIG_THUNDER_NIC_VF`` setting.
+
+#. Verify PF/VF bind using ``dpdk_nic_bind.py``:
+
+ .. code-block:: console
+
+ ./tools/dpdk_nic_bind.py --status
+
+ Example output:
+
+ .. code-block:: console
+
+ ...
+ 0002:01:00.0 'Device a01e' if= drv=thunder-nic unused=vfio-pci
+ 0002:01:00.1 'Device 0011' if=eth0 drv=thunder-nicvf unused=vfio-pci
+ 0002:01:00.2 'Device 0011' if=eth1 drv=thunder-nicvf unused=vfio-pci
+ ...
+
+#. Load ``vfio-pci`` driver:
+
+ .. code-block:: console
+
+ modprobe vfio-pci
+
+#. Bind VF devices to ``vfio-pci`` using ``dpdk_nic_bind.py``:
+
+ .. code-block:: console
+
+ ./tools/dpdk_nic_bind.py --bind vfio-pci 0002:01:00.1
+ ./tools/dpdk_nic_bind.py --bind vfio-pci 0002:01:00.2
+
+#. Verify VF bind using ``dpdk_nic_bind.py``:
+
+ .. code-block:: console
+
+ ./tools/dpdk_nic_bind.py --status
+
+ Example output:
+
+ .. code-block:: console
+
+ ...
+ 0002:01:00.1 'Device 0011' drv=vfio-pci unused=
+ 0002:01:00.2 'Device 0011' drv=vfio-pci unused=
+ ...
+ 0002:01:00.0 'Device a01e' if= drv=thunder-nic unused=vfio-pci
+ ...
+
+#. Pass VF device to VM context (PCIe Passthrough):
+
+ The VF devices may be passed through to the guest VM using qemu or
+ virt-manager or virsh etc.
+ ``librte_pmd_thunderx_nicvf`` or ``thunder-nicvf`` should be used to bind
+ the VF devices in the guest VM in :ref:`VFIO-NOIOMMU <thunderx_vfio_noiommu>` mode.
+
+ Example qemu guest launch command:
+
+ .. code-block:: console
+
+ sudo qemu-system-aarch64 -name vm1 \
+ -machine virt,gic_version=3,accel=kvm,usb=off \
+ -cpu host -m 4096 \
+ -smp 4,sockets=1,cores=8,threads=1 \
+ -nographic -nodefaults \
+ -kernel <kernel image> \
+ -append "root=/dev/vda console=ttyAMA0 rw hugepagesz=512M hugepages=3" \
+ -device vfio-pci,host=0002:01:00.1 \
+ -drive file=<rootfs.ext3>,if=none,id=disk1,format=raw \
+ -device virtio-blk-device,scsi=off,drive=disk1,id=virtio-disk1,bootindex=1 \
+ -netdev tap,id=net0,ifname=tap0,script=/etc/qemu-ifup_thunder \
+ -device virtio-net-device,netdev=net0 \
+ -serial stdio \
+ -mem-path /dev/huge
+
+#. Refer to section :ref:`Running testpmd <thunderx_testpmd_example>` for instruction
+ how to launch ``testpmd`` application.
+
+Limitations
+-----------
+
+CRC striping
+~~~~~~~~~~~~
+
+The ThunderX SoC family NICs strip the CRC for every packets coming into the
+host interface. So, CRC will be stripped even when the
+``rxmode.hw_strip_crc`` member is set to 0 in ``struct rte_eth_conf``.
+
+Maximum packet length
+~~~~~~~~~~~~~~~~~~~~~
+
+The ThunderX SoC family NICs support a maximum of a 9K jumbo frame. The value
+is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+member of ``struct rte_eth_conf`` is set to a value lower than 9200, frames
+up to 9200 bytes can still reach the host interface.
+
+Maximum packet segments
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The ThunderX SoC family NICs support up to 12 segments per packet when working
+in scatter/gather mode. So, setting MTU will result with ``EINVAL`` when the
+frame size does not fit in the maximum number of segments.
+
+Limited VFs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ThunderX SoC family NICs has 128VFs and each VF has 8/8 queues
+for RX/TX respectively. Current driver implementation has one to one mapping
+between physical port and VF hence only limited VFs can be used.
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 4737dc2e..4b9895e4 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -322,8 +322,8 @@ Known Issues
The rte_mempool uses a per-lcore cache inside the mempool.
For non-EAL pthreads, ``rte_lcore_id()`` will not return a valid number.
- So for now, when rte_mempool is used with non-EAL pthreads, the put/get operations will bypass the mempool cache and there is a performance penalty because of this bypass.
- Support for non-EAL mempool cache is currently being enabled.
+ So for now, when rte_mempool is used with non-EAL pthreads, the put/get operations will bypass the default mempool cache and there is a performance penalty because of this bypass.
+ Only user-owned external caches can be used in a non-EAL context in conjunction with ``rte_mempool_generic_put()`` and ``rte_mempool_generic_get()`` that accept an explicit cache parameter.
+ rte_ring
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index b862d0cf..07a4d354 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -52,6 +52,7 @@ Programmer's Guide
packet_distrib_lib
reorder_lib
ip_fragment_reassembly_lib
+ pdump_lib
multi_proc_support
kernel_nic_interface
thread_safety_dpdk_functions
diff --git a/doc/guides/prog_guide/ivshmem_lib.rst b/doc/guides/prog_guide/ivshmem_lib.rst
index 9401ccf2..b8a32e4c 100644
--- a/doc/guides/prog_guide/ivshmem_lib.rst
+++ b/doc/guides/prog_guide/ivshmem_lib.rst
@@ -79,6 +79,8 @@ The following is a simple guide to using the IVSHMEM Library API:
Only data structures fully residing in DPDK hugepage memory work correctly.
Supported data structures created by malloc(), mmap()
or otherwise using non-DPDK memory cause undefined behavior and even a segmentation fault.
+ Specifically, because the memzone field in an rte_ring refers to a memzone structure residing in local memory,
+ accessing the memzone field in a shared rte_ring will cause an immediate segmentation fault.
IVSHMEM Environment Configuration
---------------------------------
diff --git a/doc/guides/prog_guide/mempool_lib.rst b/doc/guides/prog_guide/mempool_lib.rst
index 5fae79ab..59466752 100644
--- a/doc/guides/prog_guide/mempool_lib.rst
+++ b/doc/guides/prog_guide/mempool_lib.rst
@@ -34,13 +34,12 @@ Mempool Library
===============
A memory pool is an allocator of a fixed-sized object.
-In the DPDK, it is identified by name and uses a ring to store free objects.
+In the DPDK, it is identified by name and uses a mempool handler to store free objects.
+The default mempool handler is ring based.
It provides some other optional services such as a per-core object cache and
an alignment helper to ensure that objects are padded to spread them equally on all DRAM or DDR3 channels.
-This library is used by the
-:ref:`Mbuf Library <Mbuf_Library>` and the
-:ref:`Environment Abstraction Layer <Environment_Abstraction_Layer>` (for logging history).
+This library is used by the :ref:`Mbuf Library <Mbuf_Library>`.
Cookies
-------
@@ -116,7 +115,7 @@ While this may mean a number of buffers may sit idle on some core's cache,
the speed at which a core can access its own cache for a specific memory pool without locks provides performance gains.
The cache is composed of a small, per-core table of pointers and its length (used as a stack).
-This cache can be enabled or disabled at creation of the pool.
+This internal cache can be enabled or disabled at creation of the pool.
The maximum size of the cache is static and is defined at compilation time (CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE).
@@ -128,6 +127,39 @@ The maximum size of the cache is static and is defined at compilation time (CONF
A mempool in Memory with its Associated Ring
+Alternatively to the internal default per-lcore local cache, an application can create and manage external caches through the ``rte_mempool_cache_create()``, ``rte_mempool_cache_free()`` and ``rte_mempool_cache_flush()`` calls.
+These user-owned caches can be explicitly passed to ``rte_mempool_generic_put()`` and ``rte_mempool_generic_get()``.
+The ``rte_mempool_default_cache()`` call returns the default internal cache if any.
+In contrast to the default caches, user-owned caches can be used by non-EAL threads too.
+
+Mempool Handlers
+------------------------
+
+This allows external memory subsystems, such as external hardware memory
+management systems and software based memory allocators, to be used with DPDK.
+
+There are two aspects to a mempool handler.
+
+* Adding the code for your new mempool operations (ops). This is achieved by
+ adding a new mempool ops code, and using the ``REGISTER_MEMPOOL_OPS`` macro.
+
+* Using the new API to call ``rte_mempool_create_empty()`` and
+ ``rte_mempool_set_ops_byname()`` to create a new mempool and specifying which
+ ops to use.
+
+Several different mempool handlers may be used in the same application. A new
+mempool can be created by using the ``rte_mempool_create_empty()`` function,
+then using ``rte_mempool_set_ops_byname()`` to point the mempool to the
+relevant mempool handler callback (ops) structure.
+
+Legacy applications may continue to use the old ``rte_mempool_create()`` API
+call, which uses a ring based mempool handler by default. These applications
+will need to be modified to use a new mempool handler.
+
+For applications that use ``rte_pktmbuf_create()``, there is a config setting
+(``RTE_MBUF_DEFAULT_MEMPOOL_OPS``) that allows the application to make use of
+an alternative mempool handler.
+
Use Cases
---------
diff --git a/doc/guides/prog_guide/pdump_lib.rst b/doc/guides/prog_guide/pdump_lib.rst
new file mode 100644
index 00000000..580ffcbd
--- /dev/null
+++ b/doc/guides/prog_guide/pdump_lib.rst
@@ -0,0 +1,123 @@
+.. BSD LICENSE
+ Copyright(c) 2016 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _pdump_library:
+
+The librte_pdump Library
+========================
+
+The ``librte_pdump`` library provides a framework for packet capturing in DPDK.
+The library does the complete copy of the Rx and Tx mbufs to a new mempool and
+hence it slows down the performance of the applications, so it is recommended
+to use this library for debugging purposes.
+
+The library provides the following APIs to initialize the packet capture framework, to enable
+or disable the packet capture, and to uninitialize it:
+
+* ``rte_pdump_init()``:
+ This API initializes the packet capture framework.
+
+* ``rte_pdump_enable()``:
+ This API enables the packet capture on a given port and queue.
+ Note: The filter option in the API is a place holder for future enhancements.
+
+* ``rte_pdump_enable_by_deviceid()``:
+ This API enables the packet capture on a given device id (``vdev name or pci address``) and queue.
+ Note: The filter option in the API is a place holder for future enhancements.
+
+* ``rte_pdump_disable()``:
+ This API disables the packet capture on a given port and queue.
+
+* ``rte_pdump_disable_by_deviceid()``:
+ This API disables the packet capture on a given device id (``vdev name or pci address``) and queue.
+
+* ``rte_pdump_uninit()``:
+ This API uninitializes the packet capture framework.
+
+* ``rte_pdump_set_socket_dir()``:
+ This API sets the server and client socket paths.
+ Note: This API is not thread-safe.
+
+
+Operation
+---------
+
+The ``librte_pdump`` library works on a client/server model. The server is responsible for enabling or
+disabling the packet capture and the clients are responsible for requesting the enabling or disabling of
+the packet capture.
+
+The packet capture framework, as part of its initialization, creates the pthread and the server socket in
+the pthread. The application that calls the framework initialization will have the server socket created,
+either under the path that the application has passed or under the default path i.e. either ``/var/run`` for
+root user or ``$HOME`` for non root user.
+
+Applications that request enabling or disabling of the packet capture will have the client socket created either under
+the path that the application has passed or under the default path i.e. either ``/var/run/`` for root user or ``$HOME``
+for not root user to send the requests to the server.
+The server socket will listen for client requests for enabling or disabling the packet capture.
+
+
+Implementation Details
+----------------------
+
+The library API ``rte_pdump_init()``, initializes the packet capture framework by creating the pthread and the server
+socket. The server socket in the pthread context will be listening to the client requests to enable or disable the
+packet capture.
+
+The library APIs ``rte_pdump_enable()`` and ``rte_pdump_enable_by_deviceid()`` enables the packet capture.
+On each call to these APIs, the library creates a separate client socket, creates the "pdump enable" request and sends
+the request to the server. The server that is listening on the socket will take the request and enable the packet capture
+by registering the Ethernet RX and TX callbacks for the given port or device_id and queue combinations.
+Then the server will mirror the packets to the new mempool and enqueue them to the rte_ring that clients have passed
+to these APIs. The server also sends the response back to the client about the status of the request that was processed.
+After the response is received from the server, the client socket is closed.
+
+The library APIs ``rte_pdump_disable()`` and ``rte_pdump_disable_by_deviceid()`` disables the packet capture.
+On each call to these APIs, the library creates a separate client socket, creates the "pdump disable" request and sends
+the request to the server. The server that is listening on the socket will take the request and disable the packet
+capture by removing the Ethernet RX and TX callbacks for the given port or device_id and queue combinations. The server
+also sends the response back to the client about the status of the request that was processed. After the response is
+received from the server, the client socket is closed.
+
+The library API ``rte_pdump_uninit()``, uninitializes the packet capture framework by closing the pthread and the
+server socket.
+
+The library API ``rte_pdump_set_socket_dir()``, sets the given path as either server socket path
+or client socket path based on the ``type`` argument of the API.
+If the given path is ``NULL``, default path will be selected, i.e. either ``/var/run/`` for root user or ``$HOME``
+for non root user. Clients also need to call this API to set their server socket path if the server socket
+path is different from default path.
+
+
+Use Case: Packet Capturing
+--------------------------
+
+The DPDK ``app/pdump`` tool is developed based on this library to capture packets in DPDK.
+Users can use this as an example to develop their own packet capturing tools.
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index 76986924..bf3ea9fd 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -299,10 +299,23 @@ Extended Statistics API
~~~~~~~~~~~~~~~~~~~~~~~
The extended statistics API allows each individual PMD to expose a unique set
-of statistics. The client of the API provides an array of
-``struct rte_eth_xstats`` type. Each ``struct rte_eth_xstats`` contains a
-string and value pair. The amount of xstats exposed, and position of the
-statistic in the array must remain constant during runtime.
+of statistics. Accessing these from application programs is done via two
+functions:
+
+* ``rte_eth_xstats_get``: Fills in an array of ``struct rte_eth_xstat``
+ with extended statistics.
+* ``rte_eth_xstats_get_names``: Fills in an array of
+ ``struct rte_eth_xstat_name`` with extended statistic name lookup
+ information.
+
+Each ``struct rte_eth_xstat`` contains an identifier and value pair, and
+each ``struct rte_eth_xstat_name`` contains a string. Each identifier
+within the ``struct rte_eth_xstat`` lookup array must have a corresponding
+entry in the ``struct rte_eth_xstat_name`` lookup array. Within the latter
+the index of the entry is the identifier the string is associated with.
+These identifiers, as well as the number of extended statistic exposed, must
+remain constant during runtime. Note that extended statistic identifiers are
+driver-specific, and hence might not be the same for different ports.
A naming scheme exists for the strings exposed to clients of the API. This is
to allow scraping of the API for statistics of interest. The naming scheme uses
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index 48e1fffb..14d5e675 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,105 +31,194 @@
Vhost Library
=============
-The vhost library implements a user space vhost driver. It supports both vhost-cuse
-(cuse: user space character device) and vhost-user(user space socket server).
-It also creates, manages and destroys vhost devices for corresponding virtio
-devices in the guest. Vhost supported vSwitch could register callbacks to this
-library, which will be called when a vhost device is activated or deactivated
-by guest virtual machine.
+The vhost library implements a user space virtio net server allowing the user
+to manipulate the virtio ring directly. In another words, it allows the user
+to fetch/put packets from/to the VM virtio net device. To achieve this, a
+vhost library should be able to:
+
+* Access the guest memory:
+
+ For QEMU, this is done by using the ``-object memory-backend-file,share=on,...``
+ option. Which means QEMU will create a file to serve as the guest RAM.
+ The ``share=on`` option allows another process to map that file, which
+ means it can access the guest RAM.
+
+* Know all the necessary information about the vring:
+
+ Information such as where the available ring is stored. Vhost defines some
+ messages to tell the backend all the information it needs to know how to
+ manipulate the vring.
+
+Currently, there are two ways to pass these messages and as a result there are
+two Vhost implementations in DPDK: *vhost-cuse* (where the character devices
+are in user space) and *vhost-user*.
+
+Vhost-cuse creates a user space character device and hook to a function ioctl,
+so that all ioctl commands that are sent from the frontend (QEMU) will be
+captured and handled.
+
+Vhost-user creates a Unix domain socket file through which messages are
+passed.
+
+.. Note::
+
+ Since DPDK v2.2, the majority of the development effort has gone into
+ enhancing vhost-user, such as multiple queue, live migration, and
+ reconnect. Thus, it is strongly advised to use vhost-user instead of
+ vhost-cuse.
+
Vhost API Overview
------------------
-* Vhost driver registration
+The following is an overview of the Vhost API functions:
+
+* ``rte_vhost_driver_register(path, flags)``
+
+ This function registers a vhost driver into the system. For vhost-cuse, a
+ ``/dev/path`` character device file will be created. For vhost-user server
+ mode, a Unix domain socket file ``path`` will be created.
+
+ Currently two flags are supported (these are valid for vhost-user only):
+
+ - ``RTE_VHOST_USER_CLIENT``
+
+ DPDK vhost-user will act as the client when this flag is given. See below
+ for an explanation.
+
+ - ``RTE_VHOST_USER_NO_RECONNECT``
+
+ When DPDK vhost-user acts as the client it will keep trying to reconnect
+ to the server (QEMU) until it succeeds. This is useful in two cases:
+
+ * When QEMU is not started yet.
+ * When QEMU restarts (for example due to a guest OS reboot).
+
+ This reconnect option is enabled by default. However, it can be turned off
+ by setting this flag.
- rte_vhost_driver_register registers the vhost driver into the system.
- For vhost-cuse, character device file will be created under the /dev directory.
- Character device name is specified as the parameter.
- For vhost-user, a Unix domain socket server will be created with the parameter as
- the local socket path.
+* ``rte_vhost_driver_session_start()``
-* Vhost session start
+ This function starts the vhost session loop to handle vhost messages. It
+ starts an infinite loop, therefore it should be called in a dedicated
+ thread.
- rte_vhost_driver_session_start starts the vhost session loop.
- Vhost session is an infinite blocking loop.
- Put the session in a dedicate DPDK thread.
+* ``rte_vhost_driver_callback_register(virtio_net_device_ops)``
-* Callback register
+ This function registers a set of callbacks, to let DPDK applications take
+ the appropriate action when some events happen. The following events are
+ currently supported:
- Vhost supported vSwitch could call rte_vhost_driver_callback_register to
- register two callbacks, new_destory and destroy_device.
- When virtio device is activated or deactivated by guest virtual machine,
- the callback will be called, then vSwitch could put the device onto data
- core or remove the device from data core by setting or unsetting
- VIRTIO_DEV_RUNNING on the device flags.
+ * ``new_device(int vid)``
-* Read/write packets from/to guest virtual machine
+ This callback is invoked when a virtio net device becomes ready. ``vid``
+ is the virtio net device ID.
- rte_vhost_enqueue_burst transmit host packets to guest.
- rte_vhost_dequeue_burst receives packets from guest.
+ * ``destroy_device(int vid)``
-* Feature enable/disable
+ This callback is invoked when a virtio net device shuts down (or when the
+ vhost connection is broken).
- Now one negotiate-able feature in vhost is merge-able.
- vSwitch could enable/disable this feature for performance consideration.
+ * ``vring_state_changed(int vid, uint16_t queue_id, int enable)``
-Vhost Implementation
---------------------
+ This callback is invoked when a specific queue's state is changed, for
+ example to enabled or disabled.
-Vhost cuse implementation
+* ``rte_vhost_enqueue_burst(vid, queue_id, pkts, count)``
+
+ Transmits (enqueues) ``count`` packets from host to guest.
+
+* ``rte_vhost_dequeue_burst(vid, queue_id, mbuf_pool, pkts, count)``
+
+ Receives (dequeues) ``count`` packets from guest, and stored them at ``pkts``.
+
+* ``rte_vhost_feature_disable/rte_vhost_feature_enable(feature_mask)``
+
+ This function disables/enables some features. For example, it can be used to
+ disable mergeable buffers and TSO features, which both are enabled by
+ default.
+
+
+Vhost Implementations
+---------------------
+
+Vhost-cuse implementation
~~~~~~~~~~~~~~~~~~~~~~~~~
+
When vSwitch registers the vhost driver, it will register a cuse device driver
into the system and creates a character device file. This cuse driver will
-receive vhost open/release/IOCTL message from QEMU simulator.
+receive vhost open/release/IOCTL messages from the QEMU simulator.
-When the open call is received, vhost driver will create a vhost device for the
-virtio device in the guest.
+When the open call is received, the vhost driver will create a vhost device
+for the virtio device in the guest.
-When VHOST_SET_MEM_TABLE IOCTL is received, vhost searches the memory region
-to find the starting user space virtual address that maps the memory of guest
-virtual machine. Through this virtual address and the QEMU pid, vhost could
-find the file QEMU uses to map the guest memory. Vhost maps this file into its
-address space, in this way vhost could fully access the guest physical memory,
-which means vhost could access the shared virtio ring and the guest physical
-address specified in the entry of the ring.
+When the ``VHOST_SET_MEM_TABLE`` ioctl is received, vhost searches the memory
+region to find the starting user space virtual address that maps the memory of
+the guest virtual machine. Through this virtual address and the QEMU pid,
+vhost can find the file QEMU uses to map the guest memory. Vhost maps this
+file into its address space, in this way vhost can fully access the guest
+physical memory, which means vhost could access the shared virtio ring and the
+guest physical address specified in the entry of the ring.
The guest virtual machine tells the vhost whether the virtio device is ready
-for processing or is de-activated through VHOST_NET_SET_BACKEND message.
-The registered callback from vSwitch will be called.
+for processing or is de-activated through the ``VHOST_NET_SET_BACKEND``
+message. The registered callback from vSwitch will be called.
-When the release call is released, vhost will destroy the device.
+When the release call is made, vhost will destroy the device.
-Vhost user implementation
+Vhost-user implementation
~~~~~~~~~~~~~~~~~~~~~~~~~
-When vSwitch registers a vhost driver, it will create a Unix domain socket server
-into the system. This server will listen for a connection and process the vhost message from
-QEMU simulator.
-When there is a new socket connection, it means a new virtio device has been created in
-the guest virtual machine, and the vhost driver will create a vhost device for this virtio device.
+Vhost-user uses Unix domain sockets for passing messages. This means the DPDK
+vhost-user implementation has two options:
+
+* DPDK vhost-user acts as the server.
+
+ DPDK will create a Unix domain socket server file and listen for
+ connections from the frontend.
+
+ Note, this is the default mode, and the only mode before DPDK v16.07.
+
+
+* DPDK vhost-user acts as the client.
+
+ Unlike the server mode, this mode doesn't create the socket file;
+ it just tries to connect to the server (which responses to create the
+ file instead).
+
+ When the DPDK vhost-user application restarts, DPDK vhost-user will try to
+ connect to the server again. This is how the "reconnect" feature works.
+
+ Note: the "reconnect" feature requires **QEMU v2.7** (or above).
+
+No matter which mode is used, once a connection is established, DPDK
+vhost-user will start receiving and processing vhost messages from QEMU.
+
+For messages with a file descriptor, the file descriptor can be used directly
+in the vhost process as it is already installed by the Unix domain socket.
-For messages with a file descriptor, the file descriptor could be directly used in the vhost
-process as it is already installed by Unix domain socket.
+The supported vhost messages are:
- * VHOST_SET_MEM_TABLE
- * VHOST_SET_VRING_KICK
- * VHOST_SET_VRING_CALL
- * VHOST_SET_LOG_FD
- * VHOST_SET_VRING_ERR
+* ``VHOST_SET_MEM_TABLE``
+* ``VHOST_SET_VRING_KICK``
+* ``VHOST_SET_VRING_CALL``
+* ``VHOST_SET_LOG_FD``
+* ``VHOST_SET_VRING_ERR``
-For VHOST_SET_MEM_TABLE message, QEMU will send us information for each memory region and its
-file descriptor in the ancillary data of the message. The fd is used to map that region.
+For ``VHOST_SET_MEM_TABLE`` message, QEMU will send information for each
+memory region and its file descriptor in the ancillary data of the message.
+The file descriptor is used to map that region.
-There is no VHOST_NET_SET_BACKEND message as in vhost cuse to signal us whether virtio device
-is ready or should be stopped.
-VHOST_SET_VRING_KICK is used as the signal to put the vhost device onto data plane.
-VHOST_GET_VRING_BASE is used as the signal to remove vhost device from data plane.
+There is no ``VHOST_NET_SET_BACKEND`` message as in vhost-cuse to signal
+whether the virtio device is ready or stopped. Instead,
+``VHOST_SET_VRING_KICK`` is used as the signal to put the vhost device into
+the data plane, and ``VHOST_GET_VRING_BASE`` is used as the signal to remove
+the vhost device from the data plane.
When the socket connection is closed, vhost will destroy the device.
Vhost supported vSwitch reference
---------------------------------
-For more vhost details and how to support vhost in vSwitch, please refer to vhost example in the
-DPDK Sample Applications Guide.
+For more vhost details and how to support vhost in vSwitch, please refer to
+the vhost example in the DPDK Sample Applications Guide.
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 327fc2bf..f502f863 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -8,6 +8,9 @@ API and ABI deprecation notices are to be posted here.
Deprecation Notices
-------------------
+* The log history is deprecated.
+ It is voided in 16.07 and will be removed in release 16.11.
+
* The ethdev hotplug API is going to be moved to EAL with a notification
mechanism added to crypto and ethdev libraries so that hotplug is now
available to both of them. This API will be stripped of the device arguments
@@ -20,73 +23,21 @@ Deprecation Notices
do not need to care about the kind of devices that are being used, making it
easier to add new buses later.
-* The EAL function pci_config_space_set is deprecated in release 16.04
- and will be removed from 16.07.
- Macros CONFIG_RTE_PCI_CONFIG, CONFIG_RTE_PCI_EXTENDED_TAG and
- CONFIG_RTE_PCI_MAX_READ_REQUEST_SIZE will be removed.
- The /sys entries extended_tag and max_read_request_size created by igb_uio
- will be removed.
-
-* ABI changes are planned for struct rte_pci_id, i.e., add new field ``class``.
- This new added ``class`` field can be used to probe pci device by class
- related info. This change should impact size of struct rte_pci_id and struct
- rte_pci_device. The release 16.04 does not contain these ABI changes, but
- release 16.07 will.
-
-* The following fields have been deprecated in rte_eth_stats:
- ibadcrc, ibadlen, imcasts, fdirmatch, fdirmiss,
- tx_pause_xon, rx_pause_xon, tx_pause_xoff, rx_pause_xoff
-
-* The xstats API and rte_eth_xstats struct will be changed to allow retrieval
- of values without any string copies or parsing.
- No backwards compatibility is planned, as it would require code duplication
- in every PMD that supports xstats.
-
* ABI changes are planned for adding four new flow types. This impacts
RTE_ETH_FLOW_MAX. The release 2.2 does not contain these ABI changes,
but release 2.3 will. [postponed]
-* ABI change is planned for the rte_mempool structure to allow mempool
- cache support to be dynamic depending on the mempool being created
- needing cache support. Saves about 1.5M of memory per rte_mempool structure
- by removing the per lcore cache memory. Change will occur in DPDK 16.07
- release and will skip the define RTE_NEXT_ABI in DPDK 16.04 release. The
- code affected is app/test/test_mempool.c and librte_mempool/rte_mempool.[ch].
- The rte_mempool.local_cache will be converted from an array to a pointer to
- allow for dynamic allocation of the per lcore cache memory.
-
-* ABI will change for rte_mempool struct to move the cache-related fields
- to the more appropriate rte_mempool_cache struct. The mempool API is
- also changed to enable external cache management that is not tied to EAL
- threads. Some mempool get and put calls are removed in favor of a more
- compact API. The ones that remain are backwards compatible and use the
- per-lcore default cache if available. This change targets release 16.07.
-
-* The rte_mempool struct will be changed in 16.07 to facilitate the new
- external mempool manager functionality.
- The ring element will be replaced with a more generic 'pool' opaque pointer
- to allow new mempool handlers to use their own user-defined mempool
- layout. Also newly added to rte_mempool is a handler index.
- The existing API will be backward compatible, but there will be new API
- functions added to facilitate the creation of mempools using an external
- handler. The 16.07 release will contain these changes.
-
-* The rte_mempool allocation will be changed in 16.07:
- allocation of large mempool in several virtual memory chunks, new API
- to populate a mempool, new API to free a mempool, allocation in
- anonymous mapping, drop of specific dom0 code. These changes will
- induce a modification of the rte_mempool structure, plus a
- modification of the API of rte_mempool_obj_iter(), implying a breakage
- of the ABI.
+* The mbuf flags PKT_RX_VLAN_PKT and PKT_RX_QINQ_PKT are deprecated and
+ are respectively replaced by PKT_RX_VLAN_STRIPPED and
+ PKT_RX_QINQ_STRIPPED, that are better described. The old flags and
+ their behavior will be kept in 16.07 and will be removed in 16.11.
-* ABI changes are planned for struct rte_port_source_params in order to
- support PCAP file reading feature. The release 16.04 contains this ABI
- change wrapped by RTE_NEXT_ABI macro. Release 16.07 will contain this
- change, and no backwards compatibility is planned.
+* The APIs rte_mempool_count and rte_mempool_free_count are being deprecated
+ on the basis that they are confusing to use - free_count actually returns
+ the number of allocated entries, not the number of free entries as expected.
+ They are being replaced by rte_mempool_avail_count and
+ rte_mempool_in_use_count respectively.
-* A librte_vhost public structures refactor is planned for DPDK 16.07
- that requires both ABI and API change.
- The proposed refactor would expose DPDK vhost dev to applications as
- a handle, like the way kernel exposes an fd to user for locating a
- specific file, and to keep all major structures internally, so that
- we are likely to be free from ABI violations in future.
+* The mempool functions for single/multi producer/consumer are deprecated and
+ will be removed in 16.11.
+ It is replaced by rte_mempool_generic_get/put functions.
diff --git a/doc/guides/rel_notes/index.rst b/doc/guides/rel_notes/index.rst
index 84317b81..52c63b40 100644
--- a/doc/guides/rel_notes/index.rst
+++ b/doc/guides/rel_notes/index.rst
@@ -36,6 +36,7 @@ Release Notes
:numbered:
rel_description
+ release_16_07
release_16_04
release_2_2
release_2_1
diff --git a/doc/guides/rel_notes/known_issues.rst b/doc/guides/rel_notes/known_issues.rst
index 923a202d..5ec19876 100644
--- a/doc/guides/rel_notes/known_issues.rst
+++ b/doc/guides/rel_notes/known_issues.rst
@@ -532,25 +532,6 @@ Cannot set link speed on Intel® 40G Ethernet controller
Poll Mode Driver (PMD).
-Stopping the port does not down the link on Intel® 40G Ethernet controller
---------------------------------------------------------------------------
-
-**Description**:
- On Intel® 40G Ethernet Controller stopping the port does not really down the port link.
-
-**Implication**:
- The port link will be still up after stopping the port.
-
-**Resolution/Workaround**:
- None
-
-**Affected Environment/Platform**:
- All.
-
-**Driver/Module**:
- Poll Mode Driver (PMD).
-
-
Devices bound to igb_uio with VT-d enabled do not work on Linux kernel 3.15-3.17
--------------------------------------------------------------------------------
@@ -618,3 +599,24 @@ DPDK may not build on some Intel CPUs using clang < 3.7.0
**Driver/Module**:
Environment Abstraction Layer (EAL).
+
+
+The last EAL argument is replaced by the program name in argv[]
+---------------------------------------------------------------
+
+**Description**:
+ The last EAL argument is replaced by program name in ``argv[]`` after ``eal_parse_args`` is called.
+ This is the intended behavior but it causes the pointer to the last EAL argument to be lost.
+
+**Implication**:
+ If the last EAL argument in ``argv[]`` is generated by a malloc function, changing it will cause memory
+ issues when freeing the argument.
+
+**Resolution/Workaround**:
+ An application should not consider the value in ``argv[]`` as unchanged.
+
+**Affected Environment/Platform**:
+ ALL.
+
+**Driver/Module**:
+ Environment Abstraction Layer (EAL).
diff --git a/doc/guides/rel_notes/release_16_07.rst b/doc/guides/rel_notes/release_16_07.rst
new file mode 100644
index 00000000..569f562f
--- /dev/null
+++ b/doc/guides/rel_notes/release_16_07.rst
@@ -0,0 +1,358 @@
+DPDK Release 16.07
+==================
+
+.. **Read this first.**
+
+ The text below explains how to update the release notes.
+
+ Use proper spelling, capitalization and punctuation in all sections.
+
+ Variable and config names should be quoted as fixed width text: ``LIKE_THIS``.
+
+ Build the docs and view the output file to ensure the changes are correct::
+
+ make doc-guides-html
+
+ firefox build/doc/html/guides/rel_notes/release_16_07.html
+
+
+New Features
+------------
+
+.. This section should contain new features added in this release. Sample format:
+
+ * **Add a title in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description in the past tense. The description
+ should be enough to allow someone scanning the release notes to understand
+ the new feature.
+
+ If the feature adds a lot of sub-features you can use a bullet list like this.
+
+ * Added feature foo to do something.
+ * Enhanced feature bar to do something else.
+
+ Refer to the previous release notes for examples.
+
+* **Removed mempool cache if not needed.**
+
+ The size of the mempool structure is reduced if the per-lcore cache is disabled.
+
+* **Added mempool external cache for non-EAL thread.**
+
+ Added new functions to create, free or flush a user-owned mempool
+ cache for non-EAL threads. Previously, cache was always disabled
+ on these threads.
+
+* **Changed the memory allocation in mempool library.**
+
+ * Added ability to allocate a large mempool in virtually fragmented memory.
+ * Added new APIs to populate a mempool with memory.
+ * Added an API to free a mempool.
+ * Modified the API of rte_mempool_obj_iter() function.
+ * Dropped specific Xen Dom0 code.
+ * Dropped specific anonymous mempool code in testpmd.
+
+* **Added new driver for Broadcom NetXtreme-C devices.**
+
+ Added the new bnxt driver for Broadcom NetXtreme-C devices. See the
+ "Network Interface Controller Drivers" document for more details on this
+ new driver.
+
+* **Added new driver for ThunderX nicvf device.**
+
+* **Added mailbox interrupt support for ixgbe and igb VFs.**
+
+ When the physical NIC link comes down or up, the PF driver will send a
+ mailbox message to notify each VF. To handle this link up/down event,
+ add mailbox interrupts support to receive the message and allow the app to
+ register a callback for it.
+
+* **Updated the ixgbe base driver.**
+
+ The ixgbe base driver was updated with changes including the
+ following:
+
+ * Added sgmii link for X550.
+ * Added mac link setup for X550a SFP and SFP+.
+ * Added KR support for X550em_a.
+ * Added new phy definitions for M88E1500.
+ * Added support for the VLVF to be bypassed when adding/removing a VFTA entry.
+ * Added X550a flow control auto negotiation support.
+
+* **Updated the i40e base driver.**
+
+ Updated the i40e base driver, which includes support for new devices IDs.
+
+* **Supported virtio on IBM POWER8.**
+
+ The ioports are mapped in memory when using Linux UIO.
+
+* **Virtio support for containers.**
+
+ Add a new virtual device, named virtio-user, to support virtio for containers.
+
+ Known limitations:
+
+ * Control queue and multi-queue are not supported yet.
+ * Cannot work with --huge-unlink.
+ * Cannot work with --no-huge.
+ * Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8) hugepages.
+ * Root privilege is a must for sorting hugepages by physical address.
+ * Can only be used with vhost user backend.
+
+* **Added vhost-user client mode.**
+
+ DPDK vhost-user could be the server as well as the client. It supports
+ server mode only before, now it also supports client mode. Client mode
+ is enabled when ``RTE_VHOST_USER_CLIENT`` flag is set while calling
+ ``rte_vhost_driver_register``.
+
+ When DPDK vhost-user restarts from normal or abnormal quit (say crash),
+ the client mode would allow DPDK to establish the connect again. Note
+ that a brand new QEMU version (v2.7 or above) is needed, otherwise, the
+ reconnect won't work.
+
+ DPDK vhost-user will also try to reconnect by default when
+
+ * the first connect fails (when QEMU is not started yet)
+ * the connection is broken (when QEMU restarts)
+
+ It can be turned off if flag ``RTE_VHOST_USER_NO_RECONNECT`` is set.
+
+* **Added NSH packet recognition in i40e.**
+
+* **Added AES-CTR support to AESNI MB PMD.**
+
+ Now AESNI MB PMD supports 128/192/256-bit counter mode AES encryption and
+ decryption.
+
+* **Added support of AES counter mode for Intel QuickAssist devices.**
+
+ Enabled support for the AES CTR algorithm for Intel QuickAssist devices.
+ Provided support for algorithm-chaining operations.
+
+* **Added KASUMI SW PMD.**
+
+ A new Crypto PMD has been added, which provides KASUMI F8 (UEA1) ciphering
+ and KASUMI F9 (UIA1) hashing.
+
+* **Added multi-writer support for RTE Hash with Intel TSX.**
+
+ The following features/modifications have been added to rte_hash library:
+
+ * Enabled application developers to use an extra flag for rte_hash creation
+ to specify default behavior (multi-thread safe/unsafe) with rte_hash_add_key
+ function.
+ * Changed Cuckoo search algorithm to breadth first search for multi-writer
+ routine and split Cuckoo Search and Move operations in order to reduce
+ transactional code region and improve TSX performance.
+ * Added a hash multi-writer test case for test app.
+
+* **Improved IP Pipeline Application.**
+
+ The following features have been added to ip_pipeline application:
+
+ * Configure the MAC address in the routing pipeline and automatic routes
+ updates with change in link state.
+ * Enable RSS per network interface through the configuration file.
+ * Streamline the CLI code.
+
+* **Added keepalive enhancements.**
+
+ Adds support for reporting of core states other than dead to
+ monitoring applications, enabling the support of broader liveness
+ reporting to external processes.
+
+* **Added packet capture framework.**
+
+ * A new library ``librte_pdump`` is added to provide packet capture API.
+ * A new ``app/pdump`` tool is added to capture packets in DPDK.
+
+
+* **Added floating VEB support for i40e PF driver.**
+
+ A "floating VEB" is a special Virtual Ethernet Bridge (VEB) which does not
+ have an upload port, but instead is used for switching traffic between
+ virtual functions (VFs) on a port.
+
+ For information on this feature, please see the "I40E Poll Mode Driver"
+ section of the "Network Interface Controller Drivers" document.
+
+
+Resolved Issues
+---------------
+
+.. This section should contain bug fixes added to the relevant sections. Sample format:
+
+ * **code/section Fixed issue in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description of the resolved issue in the past tense.
+ The title should contain the code/lib section like a commit message.
+ Add the entries in alphabetic order in the relevant sections below.
+
+
+EAL
+~~~
+
+
+Drivers
+~~~~~~~
+
+* **i40e: Fixed vlan stripping from inner header.**
+
+ Previously, for tunnel packets, such as VXLAN/NVGRE, the vlan
+ tags of the inner header will be stripped without putting vlan
+ info to descriptor.
+ Now this issue is fixed by disabling vlan stripping from inner header.
+
+* **i40e: Fixed the type issue of a single VLAN type.**
+
+ Currently, if a single VLAN header is added in a packet, it's treated
+ as inner VLAN. But generally, a single VLAN header is treated as the
+ outer VLAN header.
+ This issue is fixed by changing corresponding register for single VLAN.
+
+
+Libraries
+~~~~~~~~~
+
+* **mbuf: Fixed refcnt update when detaching.**
+
+ Fix the ``rte_pktmbuf_detach()`` function to decrement the direct
+ mbuf's reference counter. The previous behavior was not to affect
+ the reference counter. It lead a memory leak of the direct mbuf.
+
+
+Examples
+~~~~~~~~
+
+
+Other
+~~~~~
+
+
+Known Issues
+------------
+
+.. This section should contain new known issues in this release. Sample format:
+
+ * **Add title in present tense with full stop.**
+
+ Add a short 1-2 sentence description of the known issue in the present
+ tense. Add information on any known workarounds.
+
+
+API Changes
+-----------
+
+.. This section should contain API changes. Sample format:
+
+ * Add a short 1-2 sentence description of the API change. Use fixed width
+ quotes for ``rte_function_names`` or ``rte_struct_names``. Use the past tense.
+
+* The following counters are removed from ``rte_eth_stats`` structure:
+ ibadcrc, ibadlen, imcasts, fdirmatch, fdirmiss,
+ tx_pause_xon, rx_pause_xon, tx_pause_xoff, rx_pause_xoff.
+
+* The extended statistics are fetched by ids with ``rte_eth_xstats_get``
+ after a lookup by name ``rte_eth_xstats_get_names``.
+
+* The function ``rte_eth_dev_info_get`` fill the new fields ``nb_rx_queues``
+ and ``nb_tx_queues`` in the structure ``rte_eth_dev_info``.
+
+* The vhost function ``rte_vring_available_entries`` is renamed to
+ ``rte_vhost_avail_entries``.
+
+* All existing vhost APIs and callbacks with ``virtio_net`` struct pointer
+ as the parameter have been changed due to the ABI refactoring mentioned
+ below: it's replaced by ``int vid``.
+
+* The function ``rte_vhost_enqueue_burst`` no longer supports concurrent enqueuing
+ packets to the same queue.
+
+* The function ``rte_eth_dev_set_mtu`` adds a new return value ``-EBUSY``, which
+ indicates the operation is forbidden because the port is running.
+
+
+ABI Changes
+-----------
+
+.. * Add a short 1-2 sentence description of the ABI change that was announced in
+ the previous releases and made in this release. Use fixed width quotes for
+ ``rte_function_names`` or ``rte_struct_names``. Use the past tense.
+
+* The ``rte_port_source_params`` structure has new fields to support PCAP file.
+ It was already in release 16.04 with ``RTE_NEXT_ABI`` flag.
+
+* The ``rte_eth_dev_info`` structure has new fields ``nb_rx_queues`` and ``nb_tx_queues``
+ to support number of queues configured by software.
+
+* vhost ABI refactoring has been made: ``virtio_net`` structure is never
+ exported to application any more. Instead, a handle, ``vid``, has been
+ used to represent this structure internally.
+
+
+Shared Library Versions
+-----------------------
+
+.. Update any library version updated in this release and prepend with a ``+`` sign.
+
+The libraries prepended with a plus sign were incremented in this version.
+
+.. code-block:: diff
+
+ + libethdev.so.4
+ librte_acl.so.2
+ librte_cfgfile.so.2
+ librte_cmdline.so.2
+ librte_distributor.so.1
+ librte_eal.so.2
+ librte_hash.so.2
+ librte_ip_frag.so.1
+ librte_ivshmem.so.1
+ librte_jobstats.so.1
+ librte_kni.so.2
+ librte_kvargs.so.1
+ librte_lpm.so.2
+ librte_mbuf.so.2
+ + librte_mempool.so.2
+ librte_meter.so.1
+ librte_pipeline.so.3
+ librte_pmd_bond.so.1
+ librte_pmd_ring.so.2
+ + librte_port.so.3
+ librte_power.so.1
+ librte_reorder.so.1
+ librte_ring.so.1
+ librte_sched.so.1
+ librte_table.so.2
+ librte_timer.so.1
+ + librte_vhost.so.3
+
+
+Tested Platforms
+----------------
+
+.. This section should contain a list of platforms that were tested with this
+ release.
+
+ The format is:
+
+ #. Platform name.
+
+ - Platform details.
+ - Platform details.
+
+
+Tested NICs
+-----------
+
+.. This section should contain a list of NICs that were tested with this release.
+
+ The format is:
+
+ #. NIC name.
+
+ - NIC details.
+ - NIC details.
diff --git a/doc/guides/sample_app_ug/img/ipsec_endpoints.svg b/doc/guides/sample_app_ug/img/ipsec_endpoints.svg
new file mode 100644
index 00000000..e4aba4cc
--- /dev/null
+++ b/doc/guides/sample_app_ug/img/ipsec_endpoints.svg
@@ -0,0 +1,850 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="155.68507mm"
+ height="76.061203mm"
+ viewBox="0 0 551.64003 269.50821"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="endpoints.svg">
+ <defs
+ id="defs4">
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker18451"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path18453"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker18273"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ inkscape:connector-curvature="0"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path18275" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker10612"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Send">
+ <path
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#cfce37;fill-opacity:1;fill-rule:evenodd;stroke:#cfce37;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path10614"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker10270"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lstart">
+ <path
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path10272"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Sstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker9102"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path9104"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#035002;fill-opacity:1;fill-rule:evenodd;stroke:#035002;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(0.2,0,0,0.2,1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Sstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Sstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5945"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#d4ce00;fill-opacity:1;fill-rule:evenodd;stroke:#d4ce00;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(0.2,0,0,0.2,1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <linearGradient
+ id="linearGradient11398"
+ osb:paint="gradient">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop11400" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="1"
+ id="stop11402" />
+ </linearGradient>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker9540"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path9542"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <linearGradient
+ id="linearGradient9336"
+ osb:paint="solid">
+ <stop
+ style="stop-color:#dc181a;stop-opacity:1;"
+ offset="0"
+ id="stop9338" />
+ </linearGradient>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7350"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7352"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path4871"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7092"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path7094"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker5822"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path5824"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker5369"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path5371"
+ style="fill:#035002;fill-opacity:1;fill-rule:evenodd;stroke:#035002;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutL"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutL"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5013"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="scale(0.8,0.8)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Tail"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Tail"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <g
+ id="g4907"
+ transform="scale(-1.2,-1.2)"
+ style="fill:#528ac6;fill-opacity:1;stroke:#000000;stroke-opacity:1">
+ <path
+ id="path4909"
+ d="M -3.8048674,-3.9585227 0.54352094,0"
+ style="fill:#528ac6;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001;stroke-linecap:round;stroke-opacity:1"
+ inkscape:connector-curvature="0" />
+ <path
+ id="path4911"
+ d="M -1.2866832,-3.9585227 3.0617053,0"
+ style="fill:#528ac6;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001;stroke-linecap:round;stroke-opacity:1"
+ inkscape:connector-curvature="0" />
+ <path
+ id="path4913"
+ d="M 1.3053582,-3.9585227 5.6537466,0"
+ style="fill:#528ac6;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001;stroke-linecap:round;stroke-opacity:1"
+ inkscape:connector-curvature="0" />
+ <path
+ id="path4915"
+ d="M -3.8048674,4.1775838 0.54352094,0.21974226"
+ style="fill:#528ac6;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001;stroke-linecap:round;stroke-opacity:1"
+ inkscape:connector-curvature="0" />
+ <path
+ id="path4917"
+ d="M -1.2866832,4.1775838 3.0617053,0.21974226"
+ style="fill:#528ac6;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001;stroke-linecap:round;stroke-opacity:1"
+ inkscape:connector-curvature="0" />
+ <path
+ id="path4919"
+ d="M 1.3053582,4.1775838 5.6537466,0.21974226"
+ style="fill:#528ac6;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.80000001;stroke-linecap:round;stroke-opacity:1"
+ inkscape:connector-curvature="0" />
+ </g>
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path4904"
+ style="fill:#528ac6;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path4886"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#528ac6;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path4874"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker5734-4"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path5736-7"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker5734-4-3"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path5736-7-7"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker5734-4-4"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ id="path5736-7-1"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker5369-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5371-1"
+ style="fill:#d4ce00;fill-opacity:1;fill-rule:evenodd;stroke:#d4ce00;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker5369-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5371-9"
+ style="fill:#38782d;fill-opacity:1;fill-rule:evenodd;stroke:#38782d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker5369-3-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5371-1-0"
+ style="fill:#38782d;fill-opacity:1;fill-rule:evenodd;stroke:#38782d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-0.3,0,0,-0.3,0.69,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker18451-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path18453-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart-7"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path4871-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker7350-9"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path7352-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="2.8"
+ inkscape:cx="258.01346"
+ inkscape:cy="122.99559"
+ inkscape:document-units="cm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1920"
+ inkscape:window-height="1017"
+ inkscape:window-x="1592"
+ inkscape:window-y="-8"
+ inkscape:window-maximized="1"
+ fit-margin-top="3"
+ fit-margin-right="1"
+ fit-margin-bottom="1"
+ fit-margin-left="1" />
+ <metadata
+ id="metadata7">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(194.35761,4.2637303)">
+ <rect
+ style="opacity:1;fill:#528ac6;fill-opacity:1;stroke:none;stroke-opacity:1"
+ id="rect4140-5"
+ width="131.15564"
+ height="102.30846"
+ x="-179.57692"
+ y="140.22072"
+ ry="7.155364" />
+ <rect
+ style="opacity:1;fill:#528ac6;fill-opacity:1;stroke:none;stroke-opacity:1"
+ id="rect4140-5-1"
+ width="131.15564"
+ height="102.30846"
+ x="213.55591"
+ y="140.22072"
+ ry="6.5590839" />
+ <rect
+ style="opacity:1;fill:#528ac6;fill-opacity:1;stroke:none;stroke-opacity:1"
+ id="rect4140-5-2"
+ width="131.15564"
+ height="46.258114"
+ x="-179.57692"
+ y="6.3661909"
+ ry="5.3665228" />
+ <rect
+ style="opacity:1;fill:#528ac6;fill-opacity:1;stroke:none;stroke-opacity:1"
+ id="rect4140-5-2-7"
+ width="131.15564"
+ height="46.258114"
+ x="213.55591"
+ y="6.3661909"
+ ry="6.5590839" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.07670021px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-182.36929"
+ y="188.59424"
+ id="text4210"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan4212"
+ x="-182.36929"
+ y="188.59424">ep0</tspan><tspan
+ sodipodi:role="line"
+ x="-182.36929"
+ y="208.69011"
+ id="tspan4214" /><tspan
+ sodipodi:role="line"
+ x="-182.36929"
+ y="228.786"
+ id="tspan4216" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.07670021px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="324.621"
+ y="188.49612"
+ id="text4218"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan4220"
+ x="324.621"
+ y="188.49612">ep1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.07670021px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-159.97678"
+ y="32.860863"
+ id="text4222"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan4224"
+ x="-159.97678"
+ y="32.860863">traffic gen</tspan><tspan
+ sodipodi:role="line"
+ x="-159.97678"
+ y="52.956738"
+ id="tspan4226" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.07670021px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="249.77068"
+ y="32.860863"
+ id="text4222-1"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan4224-1"
+ x="249.77068"
+ y="32.860863">traffic gen</tspan><tspan
+ sodipodi:role="line"
+ x="249.77068"
+ y="52.956738"
+ id="tspan4226-6" /></text>
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.60767007px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)"
+ d="m -48.997848,184.52811 c 260.957198,-0.5963 260.957198,-0.5963 260.957198,-0.5963"
+ id="path5305"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.60767007px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker10270)"
+ d="M -47.641399,212.81116 C 213.3158,212.21487 213.3158,212.21487 213.3158,212.21487"
+ id="path5305-2"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.60767007px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker7350)"
+ d="m 262.8239,53.615939 c 0,85.864371 0,85.864371 0,85.864371"
+ id="path5922-0-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.60767007px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart)"
+ d="m -146.8568,54.80849 c 0,85.86437 0,85.86437 0,85.86437"
+ id="path5922-0-1"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.5px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="246.78571"
+ y="500.57648"
+ id="text7750"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan7752"
+ x="246.78571"
+ y="500.57648" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:18.08628845px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-158.9595"
+ y="151.84753"
+ id="text7754"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7756"
+ x="-158.9595"
+ y="151.84753">2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:18.08628845px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-110.66317"
+ y="151.84753"
+ id="text7758"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7760"
+ x="-110.66317"
+ y="151.84753">3</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:18.08628845px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="267.07309"
+ y="151.84753"
+ id="text7762"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7764"
+ x="267.07309"
+ y="151.84753">2</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:18.08628845px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="318.81448"
+ y="151.84753"
+ id="text7766"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7768"
+ x="318.81448"
+ y="151.84753">3</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:18.08628845px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="225.86406"
+ y="183.73483"
+ id="text7770"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7772"
+ x="225.86406"
+ y="183.73483">0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:18.08628845px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="226.18214"
+ y="211.56969"
+ id="text7774"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7776"
+ x="226.18214"
+ y="211.56969">1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:11.25px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="311.78571"
+ y="505.93362"
+ id="text7778"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan7780"
+ x="311.78571"
+ y="505.93362" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:18.08628845px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-65.239075"
+ y="183.73483"
+ id="text7782"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7784"
+ x="-65.239075"
+ y="183.73483">0</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:18.08628845px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="-64.921005"
+ y="211.56969"
+ id="text7786"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7788"
+ x="-64.921005"
+ y="211.56969">1</tspan></text>
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#dc181a;stroke-width:1.60606241;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:3.21212486, 9.63637456;stroke-dashoffset:0;stroke-opacity:1"
+ d="M -190.00162,248.33118 C -74.638406,174.62535 10.240093,56.871479 81.499676,57.515772 176.72224,59.019701 245.16177,197.27679 352.92565,246.54234"
+ id="path7850"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.07670021px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#dc181a;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="84.985992"
+ y="98.449951"
+ id="text7876"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7878"
+ x="84.985992"
+ y="98.449951">UNPROTECTED</tspan><tspan
+ sodipodi:role="line"
+ x="84.985992"
+ y="118.54583"
+ id="tspan7886">cipher-text</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.07670021px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:center;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:middle;fill:#dc181a;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="84.899643"
+ y="20.937273"
+ id="text7880"
+ sodipodi:linespacing="125%"
+ inkscape:transform-center-x="48.100163"
+ inkscape:transform-center-y="3.577681"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan7882"
+ x="84.899643"
+ y="20.937273">PROTECTED</tspan><tspan
+ sodipodi:role="line"
+ x="84.899643"
+ y="41.03315"
+ id="tspan7884">clear-text</tspan></text>
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#035002;stroke-width:3.42781782;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker5369)"
+ d="m -101.57521,164.71986 c -0.0232,13.22578 17.528492,19.5572 32.486212,20.12768"
+ id="path9370"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#d4ce00;stroke-width:3.08351111;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker5369-3)"
+ d="m 231.46374,184.56991 c 19.27508,-1.72507 24.47287,-7.77038 31.06883,-19.63365"
+ id="path9370-8"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.8206749px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="60.45417"
+ y="251.78688"
+ id="text14635"
+ sodipodi:linespacing="125%"
+ transform="scale(0.9630889,1.0383257)"><tspan
+ sodipodi:role="line"
+ id="tspan14637"
+ x="60.45417"
+ y="251.78688">outbound</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:16.07670021px;line-height:125%;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="62.073208"
+ y="232.23116"
+ id="text14635-4"
+ sodipodi:linespacing="125%"
+ transform="scale(0.96291595,1.0385122)"><tspan
+ sodipodi:role="line"
+ id="tspan14637-2"
+ x="62.073208"
+ y="232.23116">inbound</tspan></text>
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#d4ce00;stroke-width:3.8278625;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow1Sstart)"
+ d="m -144.07995,171.20523 c 15.90052,30.60035 40.80354,39.04445 75.670299,43.29761"
+ id="path9370-1"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:#035002;stroke-width:3.73783302;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#marker9102)"
+ d="m 236.99654,212.30897 c 40.08586,-4.29157 62.39356,-19.33069 76.11102,-48.84339"
+ id="path9370-8-8"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#d4ce00;stroke-width:4.01917505;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 32.274837,235.80819 c 18.037567,-0.2236 20.456397,-0.29813 20.456397,-0.29813"
+ id="path10604"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#035002;stroke-width:4.01917505;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 32.274837,256.82674 c 18.037567,-0.22362 20.456397,-0.29814 20.456397,-0.29814"
+ id="path10604-4"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.60767007px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-7)"
+ d="m -100.78618,138.21108 c 0,-85.864363 0,-85.864363 0,-85.864363"
+ id="path5922-0-1-5"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.60767007px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#marker7350-9)"
+ d="m 312.42811,137.49681 c 0,-85.864382 0,-85.864382 0,-85.864382"
+ id="path5922-0-6-9"
+ inkscape:connector-curvature="0" />
+ </g>
+</svg>
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index 930f68c0..96bb3179 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -76,6 +76,7 @@ Sample Applications User Guide
ptpclient
performance_thread
ipsec_secgw
+ pdump
**Figures**
diff --git a/doc/guides/sample_app_ug/ip_pipeline.rst b/doc/guides/sample_app_ug/ip_pipeline.rst
index 899fd4a0..09cbc174 100644
--- a/doc/guides/sample_app_ug/ip_pipeline.rst
+++ b/doc/guides/sample_app_ug/ip_pipeline.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -351,33 +351,35 @@ Application resources present in the configuration file
.. table:: Application resource names in the configuration file
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Resource type | Format | Examples |
- +==========================+=============================+=================================================+
- | Pipeline | ``PIPELINE<ID>`` | ``PIPELINE0``, ``PIPELINE1`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Mempool | ``MEMPOOL<ID>`` | ``MEMPOOL0``, ``MEMPOOL1`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Link (network interface) | ``LINK<ID>`` | ``LINK0``, ``LINK1`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Link RX queue | ``RXQ<LINK_ID>.<QUEUE_ID>`` | ``RXQ0.0``, ``RXQ1.5`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Link TX queue | ``TXQ<LINK_ID>.<QUEUE_ID>`` | ``TXQ0.0``, ``TXQ1.5`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Software queue | ``SWQ<ID>`` | ``SWQ0``, ``SWQ1`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Traffic Manager | ``TM<LINK_ID>`` | ``TM0``, ``TM1`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Source | ``SOURCE<ID>`` | ``SOURCE0``, ``SOURCE1`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Sink | ``SINK<ID>`` | ``SINK0``, ``SINK1`` |
- +--------------------------+-----------------------------+-------------------------------------------------+
- | Message queue | ``MSGQ<ID>`` | ``MSGQ0``, ``MSGQ1``, |
- | | ``MSGQ-REQ-PIPELINE<ID>`` | ``MSGQ-REQ-PIPELINE2``, ``MSGQ-RSP-PIPELINE2,`` |
- | | ``MSGQ-RSP-PIPELINE<ID>`` | ``MSGQ-REQ-CORE-s0c1``, ``MSGQ-RSP-CORE-s0c1`` |
- | | ``MSGQ-REQ-CORE-<CORE_ID>`` | |
- | | ``MSGQ-RSP-CORE-<CORE_ID>`` | |
- +--------------------------+-----------------------------+-------------------------------------------------+
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Resource type | Format | Examples |
+ +============================+=============================+=================================================+
+ | Pipeline | ``PIPELINE<ID>`` | ``PIPELINE0``, ``PIPELINE1`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Mempool | ``MEMPOOL<ID>`` | ``MEMPOOL0``, ``MEMPOOL1`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Link (network interface) | ``LINK<ID>`` | ``LINK0``, ``LINK1`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Link RX queue | ``RXQ<LINK_ID>.<QUEUE_ID>`` | ``RXQ0.0``, ``RXQ1.5`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Link TX queue | ``TXQ<LINK_ID>.<QUEUE_ID>`` | ``TXQ0.0``, ``TXQ1.5`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Software queue | ``SWQ<ID>`` | ``SWQ0``, ``SWQ1`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Traffic Manager | ``TM<LINK_ID>`` | ``TM0``, ``TM1`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | KNI (kernel NIC interface) | ``KNI<LINK_ID>`` | ``KNI0``, ``KNI1`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Source | ``SOURCE<ID>`` | ``SOURCE0``, ``SOURCE1`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Sink | ``SINK<ID>`` | ``SINK0``, ``SINK1`` |
+ +----------------------------+-----------------------------+-------------------------------------------------+
+ | Message queue | ``MSGQ<ID>`` | ``MSGQ0``, ``MSGQ1``, |
+ | | ``MSGQ-REQ-PIPELINE<ID>`` | ``MSGQ-REQ-PIPELINE2``, ``MSGQ-RSP-PIPELINE2,`` |
+ | | ``MSGQ-RSP-PIPELINE<ID>`` | ``MSGQ-REQ-CORE-s0c1``, ``MSGQ-RSP-CORE-s0c1`` |
+ | | ``MSGQ-REQ-CORE-<CORE_ID>`` | |
+ | | ``MSGQ-RSP-CORE-<CORE_ID>`` | |
+ +----------------------------+-----------------------------+-------------------------------------------------+
``LINK`` instances are created implicitly based on the ``PORT_MASK`` application startup argument.
``LINK0`` is the first port enabled in the ``PORT_MASK``, port 1 is the next one, etc.
@@ -386,7 +388,7 @@ For example, if bit 5 is the first bit set in the bitmask, then ``LINK0`` is hav
This mechanism creates a contiguous LINK ID space and isolates the configuration file against changes in the board
PCIe slots where NICs are plugged in.
-``RXQ``, ``TXQ`` and ``TM`` instances have the LINK ID as part of their name.
+``RXQ``, ``TXQ``, ``TM`` and ``KNI`` instances have the LINK ID as part of their name.
For example, ``RXQ2.1``, ``TXQ2.1`` and ``TM2`` are all associated with ``LINK2``.
@@ -707,6 +709,58 @@ TM section
+---------------+---------------------------------------------+----------+----------+---------------+
+KNI section
+~~~~~~~~~~~
+
+.. _table_ip_pipelines_kni_section:
+
+.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|
+
+.. table:: Configuration file KNI section
+
+ +---------------+----------------------------------------------+----------+------------------+---------------+
+ | Section | Description | Optional | Type | Default value |
+ +===============+==============================================+==========+==================+===============+
+ | core | CPU core to run the KNI kernel thread. | YES | See "CPU Core | Not set |
+ | | When core config is set, the KNI kernel | | notation" | |
+ | | thread will be bound to the particular core. | | | |
+ | | When core config is not set, the KNI kernel | | | |
+ | | thread will be scheduled by the OS. | | | |
+ +---------------+----------------------------------------------+----------+------------------+---------------+
+ | mempool | Mempool to use for buffer allocation for | YES | uint32_t | MEMPOOL0 |
+ | | current KNI port. The mempool ID has | | | |
+ | | to be associated with a valid instance | | | |
+ | | defined in the mempool entry of the global | | | |
+ | | section. | | | |
+ +---------------+----------------------------------------------+----------+------------------+---------------+
+ | burst_read | Read burst size (number of packets) | YES | uint32_t | 32 |
+ | | | | power of 2 | |
+ | | | | 0 < burst < size | |
+ +---------------+----------------------------------------------+----------+------------------+---------------+
+ | burst_write | Write burst size (number of packets) | YES | uint32_t | 32 |
+ | | | | power of 2 | |
+ | | | | 0 < burst < size | |
+ +---------------+----------------------------------------------+----------+------------------+---------------+
+ | dropless | When dropless is set to NO, packets can be | YES | YES/NO | NO |
+ | | dropped if not enough free slots are | | | |
+ | | currently available in the queue, so the | | | |
+ | | write operation to the queue is non- | | | |
+ | | blocking. | | | |
+ | | When dropless is set to YES, packets cannot | | | |
+ | | be dropped if not enough free slots are | | | |
+ | | currently available in the queue, so the | | | |
+ | | write operation to the queue is blocking, as | | | |
+ | | the write operation is retried until enough | | | |
+ | | free slots become available and all the | | | |
+ | | packets are successfully written to the | | | |
+ | | queue. | | | |
+ +---------------+----------------------------------------------+----------+------------------+---------------+
+ | n_retries | Number of retries. Valid only when dropless | YES | uint64_t | 0 |
+ | | is set to YES. When set to 0, it indicates | | | |
+ | | unlimited number of retries. | | | |
+ +---------------+----------------------------------------------+----------+------------------+---------------+
+
+
SOURCE section
~~~~~~~~~~~~~~
diff --git a/doc/guides/sample_app_ug/ipsec_secgw.rst b/doc/guides/sample_app_ug/ipsec_secgw.rst
index c11c7e7e..fcb33c26 100644
--- a/doc/guides/sample_app_ug/ipsec_secgw.rst
+++ b/doc/guides/sample_app_ug/ipsec_secgw.rst
@@ -38,165 +38,171 @@ Overview
--------
The application demonstrates the implementation of a Security Gateway
-(not IPsec compliant, see Constraints bellow) using DPDK based on RFC4301,
+(not IPsec compliant, see the Constraints section below) using DPDK based on RFC4301,
RFC4303, RFC3602 and RFC2404.
Internet Key Exchange (IKE) is not implemented, so only manual setting of
Security Policies and Security Associations is supported.
The Security Policies (SP) are implemented as ACL rules, the Security
-Associations (SA) are stored in a table and the Routing is implemented
+Associations (SA) are stored in a table and the routing is implemented
using LPM.
-The application classify the ports between Protected and Unprotected.
-Thus, traffic received in an Unprotected or Protected port is consider
+The application classifies the ports as *Protected* and *Unprotected*.
+Thus, traffic received on an Unprotected or Protected port is consider
Inbound or Outbound respectively.
-Path for IPsec Inbound traffic:
+The Path for IPsec Inbound traffic is:
-* Read packets from the port
+* Read packets from the port.
* Classify packets between IPv4 and ESP.
-* Inbound SA lookup for ESP packets based on their SPI
-* Verification/Decryption
-* Removal of ESP and outer IP header
-* Inbound SP check using ACL of decrypted packets and any other IPv4 packet
- we read.
-* Routing
-* Write packet to port
-
-Path for IPsec Outbound traffic:
-
-* Read packets from the port
-* Outbound SP check using ACL of all IPv4 traffic
-* Outbound SA lookup for packets that need IPsec protection
-* Add ESP and outer IP header
-* Encryption/Digest
-* Routing
-* Write packet to port
+* Perform Inbound SA lookup for ESP packets based on their SPI.
+* Perform Verification/Decryption.
+* Remove ESP and outer IP header
+* Inbound SP check using ACL of decrypted packets and any other IPv4 packets.
+* Routing.
+* Write packet to port.
+
+The Path for the IPsec Outbound traffic is:
+
+* Read packets from the port.
+* Perform Outbound SP check using ACL of all IPv4 traffic.
+* Perform Outbound SA lookup for packets that need IPsec protection.
+* Add ESP and outer IP header.
+* Perform Encryption/Digest.
+* Routing.
+* Write packet to port.
+
Constraints
-----------
-* IPv4 traffic
-* ESP tunnel mode
-* EAS-CBC, HMAC-SHA1 and NULL
-* Each SA must be handle by a unique lcore (1 RX queue per port)
-* No chained mbufs
+
+* No IPv6 options headers.
+* No AH mode.
+* Currently only EAS-CBC, HMAC-SHA1 and NULL.
+* Each SA must be handle by a unique lcore (*1 RX queue per port*).
+* No chained mbufs.
+
Compiling the Application
-------------------------
To compile the application:
-#. Go to the sample application directory:
-
- .. code-block:: console
+#. Go to the sample application directory::
export RTE_SDK=/path/to/rte_sdk
cd ${RTE_SDK}/examples/ipsec-secgw
-#. Set the target (a default target is used if not specified). For example:
+#. Set the target (a default target is used if not specified). For example::
- .. code-block:: console
export RTE_TARGET=x86_64-native-linuxapp-gcc
See the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-#. Build the application:
-
- .. code-block:: console
+#. Build the application::
make
+#. [Optional] Build the application for debugging:
+ This option adds some extra flags, disables compiler optimizations and
+ is verbose::
+
+ make DEBUG=1
+
+
Running the Application
-----------------------
-The application has a number of command line options:
+The application has a number of command line options::
-.. code-block:: console
- ./build/ipsec-secgw [EAL options] -- -p PORTMASK -P -u PORTMASK --config
- (port,queue,lcore)[,(port,queue,lcore] --single-sa SAIDX --ep0|--ep1
+ ./build/ipsec-secgw [EAL options] --
+ -p PORTMASK -P -u PORTMASK
+ --config (port,queue,lcore)[,(port,queue,lcore]
+ --single-sa SAIDX
+ --ep0|--ep1
-where,
+Where:
-* -p PORTMASK: Hexadecimal bitmask of ports to configure
+* ``-p PORTMASK``: Hexadecimal bitmask of ports to configure.
-* -P: optional, sets all ports to promiscuous mode so that packets are
+* ``-P``: *optional*. Sets all ports to promiscuous mode so that packets are
accepted regardless of the packet's Ethernet MAC destination address.
Without this option, only packets with the Ethernet MAC destination address
set to the Ethernet address of the port are accepted (default is enabled).
-* -u PORTMASK: hexadecimal bitmask of unprotected ports
+* ``-u PORTMASK``: hexadecimal bitmask of unprotected ports
-* --config (port,queue,lcore)[,(port,queue,lcore)]: determines which queues
- from which ports are mapped to which cores
+* ``--config (port,queue,lcore)[,(port,queue,lcore)]``: determines which queues
+ from which ports are mapped to which cores.
-* --single-sa SAIDX: use a single SA for outbound traffic, bypassing the SP
+* ``--single-sa SAIDX``: use a single SA for outbound traffic, bypassing the SP
on both Inbound and Outbound. This option is meant for debugging/performance
purposes.
-* --ep0: configure the app as Endpoint 0.
+* ``--ep0``: configure the app as Endpoint 0.
-* --ep1: configure the app as Endpoint 1.
+* ``--ep1``: configure the app as Endpoint 1.
-Either one of --ep0 or --ep1 *must* be specified.
-The main purpose of these options is two easily configure two systems
-back-to-back that would forward traffic through an IPsec tunnel.
+Either one of ``--ep0`` or ``--ep1`` **must** be specified.
+The main purpose of these options is to easily configure two systems
+back-to-back that would forward traffic through an IPsec tunnel (see
+:ref:`figure_ipsec_endpoints`).
The mapping of lcores to port/queues is similar to other l3fwd applications.
-For example, given the following command line:
+For example, given the following command line::
-.. code-block:: console
-
- ./build/ipsec-secgw -l 20,21 -n 4 --socket-mem 0,2048
- --vdev "cryptodev_null_pmd" -- -p 0xf -P -u 0x3
- --config="(0,0,20),(1,0,20),(2,0,21),(3,0,21)" --ep0
+ ./build/ipsec-secgw -l 20,21 -n 4 --socket-mem 0,2048 \
+ --vdev "cryptodev_null_pmd" -- -p 0xf -P -u 0x3 \
+ --config="(0,0,20),(1,0,20),(2,0,21),(3,0,21)" --ep0 \
where each options means:
-* The -l option enables cores 20 and 21
+* The ``-l`` option enables cores 20 and 21.
-* The -n option sets memory 4 channels
+* The ``-n`` option sets memory 4 channels.
-* The --socket-mem to use 2GB on socket 1
+* The ``--socket-mem`` to use 2GB on socket 1.
-* The --vdev "cryptodev_null_pmd" option creates virtual NULL cryptodev PMD
+* The ``--vdev "cryptodev_null_pmd"`` option creates virtual NULL cryptodev PMD.
-* The -p option enables ports (detected) 0, 1, 2 and 3
+* The ``-p`` option enables ports (detected) 0, 1, 2 and 3.
-* The -P option enables promiscuous mode
+* The ``-P`` option enables promiscuous mode.
-* The -u option sets ports 1 and 2 as unprotected, leaving 2 and 3 as protected
+* The ``-u`` option sets ports 1 and 2 as unprotected, leaving 2 and 3 as protected.
-* The --config option enables one queue per port with the following mapping:
+* The ``--config`` option enables one queue per port with the following mapping:
-+----------+-----------+-----------+---------------------------------------+
-| **Port** | **Queue** | **lcore** | **Description** |
-| | | | |
-+----------+-----------+-----------+---------------------------------------+
-| 0 | 0 | 20 | Map queue 0 from port 0 to lcore 20. |
-| | | | |
-+----------+-----------+-----------+---------------------------------------+
-| 1 | 0 | 20 | Map queue 0 from port 1 to lcore 20. |
-| | | | |
-+----------+-----------+-----------+---------------------------------------+
-| 2 | 0 | 21 | Map queue 0 from port 2 to lcore 21. |
-| | | | |
-+----------+-----------+-----------+---------------------------------------+
-| 3 | 0 | 21 | Map queue 0 from port 3 to lcore 21. |
-| | | | |
-+----------+-----------+-----------+---------------------------------------+
+ +----------+-----------+-----------+---------------------------------------+
+ | **Port** | **Queue** | **lcore** | **Description** |
+ | | | | |
+ +----------+-----------+-----------+---------------------------------------+
+ | 0 | 0 | 20 | Map queue 0 from port 0 to lcore 20. |
+ | | | | |
+ +----------+-----------+-----------+---------------------------------------+
+ | 1 | 0 | 20 | Map queue 0 from port 1 to lcore 20. |
+ | | | | |
+ +----------+-----------+-----------+---------------------------------------+
+ | 2 | 0 | 21 | Map queue 0 from port 2 to lcore 21. |
+ | | | | |
+ +----------+-----------+-----------+---------------------------------------+
+ | 3 | 0 | 21 | Map queue 0 from port 3 to lcore 21. |
+ | | | | |
+ +----------+-----------+-----------+---------------------------------------+
-* The --ep0 options configures the app with a given set of SP, SA and Routing
+* The ``--ep0`` options configures the app with a given set of SP, SA and Routing
entries as explained below in more detail.
Refer to the *DPDK Getting Started Guide* for general information on running
applications and the Environment Abstraction Layer (EAL) options.
The application would do a best effort to "map" crypto devices to cores, with
-hardware devices having priority.
+hardware devices having priority. Basically, hardware devices if present would
+be assigned to a core before software ones.
This means that if the application is using a single core and both hardware
and software crypto devices are detected, hardware devices will be used.
@@ -208,18 +214,35 @@ For example, something like the following command line:
.. code-block:: console
- ./build/ipsec-secgw -l 20,21 -n 4 --socket-mem 0,2048
- -w 81:00.0 -w 81:00.1 -w 81:00.2 -w 81:00.3
- --vdev "cryptodev_aesni_mb_pmd" --vdev "cryptodev_null_pmd" --
- -p 0xf -P -u 0x3 --config="(0,0,20),(1,0,20),(2,0,21),(3,0,21)"
+ ./build/ipsec-secgw -l 20,21 -n 4 --socket-mem 0,2048 \
+ -w 81:00.0 -w 81:00.1 -w 81:00.2 -w 81:00.3 \
+ --vdev "cryptodev_aesni_mb_pmd" --vdev "cryptodev_null_pmd" \
+ -- \
+ -p 0xf -P -u 0x3 --config="(0,0,20),(1,0,20),(2,0,21),(3,0,21)" \
--ep0
+
Configurations
--------------
The following sections provide some details on the default values used to
initialize the SP, SA and Routing tables.
-Currently all the configuration is hard coded into the application.
+Currently all configuration information is hard coded into the application.
+
+The following image illustrate a few of the concepts regarding IPSec, such
+as protected/unprotected and inbound/outbound traffic, from the point of
+view of two back-to-back endpoints:
+
+.. _figure_ipsec_endpoints:
+
+.. figure:: img/ipsec_endpoints.*
+
+ IPSec Inbound/Outbound traffic
+
+Note that the above image only displays unidirectional traffic per port
+for illustration purposes.
+The application supports bidirectional traffic on all ports,
+
Security Policy Initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -228,107 +251,126 @@ As mention in the overview, the Security Policies are ACL rules.
The application defines two ACLs, one each of Inbound and Outbound, and
it replicates them per socket in use.
-Following are the default rules:
-
-Endpoint 0 Outbound Security Policies:
-
-+---------+------------------+-----------+------------+
-| **Src** | **Dst** | **proto** | **SA idx** |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.105.0/24 | Any | 5 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.106.0/24 | Any | 6 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.107.0/24 | Any | 7 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.108.0/24 | Any | 8 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.200.0/24 | Any | 9 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.250.0/24 | Any | BYPASS |
-| | | | |
-+---------+------------------+-----------+------------+
-
-Endpoint 0 Inbound Security Policies:
-
-+---------+------------------+-----------+------------+
-| **Src** | **Dst** | **proto** | **SA idx** |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.115.0/24 | Any | 5 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.116.0/24 | Any | 6 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.117.0/24 | Any | 7 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.118.0/24 | Any | 8 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.210.0/24 | Any | 9 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.240.0/24 | Any | BYPASS |
-| | | | |
-+---------+------------------+-----------+------------+
-
-Endpoint 1 Outbound Security Policies:
-
-+---------+------------------+-----------+------------+
-| **Src** | **Dst** | **proto** | **SA idx** |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.115.0/24 | Any | 5 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.116.0/24 | Any | 6 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.117.0/24 | Any | 7 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.118.0/24 | Any | 8 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.210.0/24 | Any | 9 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.240.0/24 | Any | BYPASS |
-| | | | |
-+---------+------------------+-----------+------------+
-
-Endpoint 1 Inbound Security Policies:
-
-+---------+------------------+-----------+------------+
-| **Src** | **Dst** | **proto** | **SA idx** |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.105.0/24 | Any | 5 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.106.0/24 | Any | 6 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.107.0/24 | Any | 7 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.108.0/24 | Any | 8 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.200.0/24 | Any | 9 |
-| | | | |
-+---------+------------------+-----------+------------+
-| Any | 192.168.250.0/24 | Any | BYPASS |
-| | | | |
-+---------+------------------+-----------+------------+
+Following are the default rules which show only the relevant information,
+assuming ANY value is valid for the fields not mentioned (src ip, proto,
+src/dst ports).
+
+.. _table_ipsec_endpoint_outbound_sp:
+
+.. table:: Endpoint 0 Outbound Security Policies
+
+ +-----------------------------------+------------+
+ | **Dst** | **SA idx** |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.105.0/24 | 5 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.106.0/24 | 6 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.175.0/24 | 10 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.176.0/24 | 11 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.200.0/24 | 15 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.201.0/24 | 16 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.55.0/24 | 25 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.56.0/24 | 26 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.240.0/24 | BYPASS |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.241.0/24 | BYPASS |
+ | | |
+ +-----------------------------------+------------+
+ | 0:0:0:0:5555:5555:0:0/96 | 5 |
+ | | |
+ +-----------------------------------+------------+
+ | 0:0:0:0:6666:6666:0:0/96 | 6 |
+ | | |
+ +-----------------------------------+------------+
+ | 0:0:1111:1111:0:0:0:0/96 | 10 |
+ | | |
+ +-----------------------------------+------------+
+ | 0:0:1111:1111:1111:1111:0:0/96 | 11 |
+ | | |
+ +-----------------------------------+------------+
+ | 0:0:0:0:aaaa:aaaa:0:0/96 | 25 |
+ | | |
+ +-----------------------------------+------------+
+ | 0:0:0:0:bbbb:bbbb:0:0/96 | 26 |
+ | | |
+ +-----------------------------------+------------+
+
+.. _table_ipsec_endpoint_inbound_sp:
+
+.. table:: Endpoint 0 Inbound Security Policies
+
+ +-----------------------------------+------------+
+ | **Dst** | **SA idx** |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.115.0/24 | 105 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.116.0/24 | 106 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.185.0/24 | 110 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.186.0/24 | 111 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.210.0/24 | 115 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.211.0/24 | 116 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.65.0/24 | 125 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.66.0/24 | 126 |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.245.0/24 | BYPASS |
+ | | |
+ +-----------------------------------+------------+
+ | 192.168.246.0/24 | BYPASS |
+ | | |
+ +-----------------------------------+------------+
+ | ffff:0:0:0:5555:5555:0:0/96 | 105 |
+ | | |
+ +-----------------------------------+------------+
+ | ffff:0:0:0:6666:6666:0:0/96 | 106 |
+ | | |
+ +-----------------------------------+------------+
+ | ffff:0:1111:1111:0:0:0:0/96 | 110 |
+ | | |
+ +-----------------------------------+------------+
+ | ffff:0:1111:1111:1111:1111:0:0/96 | 111 |
+ | | |
+ +-----------------------------------+------------+
+ | ffff:0:0:0:aaaa:aaaa:0:0/96 | 125 |
+ | | |
+ +-----------------------------------+------------+
+ | ffff:0:0:0:bbbb:bbbb:0:0/96 | 126 |
+ | | |
+ +-----------------------------------+------------+
+
+For Endpoint 1, we use the same policies in reverse, meaning the Inbound SP
+entries are set as Outbound and vice versa.
Security Association Initialization
@@ -336,7 +378,7 @@ Security Association Initialization
The SAs are kept in a array table.
-For Inbound, the SPI is used as index module the table size.
+For Inbound, the SPI is used as index modulo the table size.
This means that on a table for 100 SA, SPI 5 and 105 would use the same index
and that is not currently supported.
@@ -346,179 +388,327 @@ not the SPI in the Security Policy.
All SAs configured with AES-CBC and HMAC-SHA1 share the same values for cipher
block size and key, and authentication digest size and key.
-Following are the default values:
-
-Endpoint 0 Outbound Security Associations:
-
-+---------+------------+-----------+----------------+------------------+
-| **SPI** | **Cipher** | **Auth** | **Tunnel src** | **Tunnel dst** |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 5 | AES-CBC | HMAC-SHA1 | 172.16.1.5 | 172.16.2.5 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 6 | AES-CBC | HMAC-SHA1 | 172.16.1.6 | 172.16.2.6 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 7 | AES-CBC | HMAC-SHA1 | 172.16.1.7 | 172.16.2.7 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 8 | AES-CBC | HMAC-SHA1 | 172.16.1.8 | 172.16.2.8 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 9 | NULL | NULL | 172.16.1.5 | 172.16.2.5 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-
-Endpoint 0 Inbound Security Associations:
-
-+---------+------------+-----------+----------------+------------------+
-| **SPI** | **Cipher** | **Auth** | **Tunnel src** | **Tunnel dst** |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 5 | AES-CBC | HMAC-SHA1 | 172.16.2.5 | 172.16.1.5 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 6 | AES-CBC | HMAC-SHA1 | 172.16.2.6 | 172.16.1.6 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 7 | AES-CBC | HMAC-SHA1 | 172.16.2.7 | 172.16.1.7 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 8 | AES-CBC | HMAC-SHA1 | 172.16.2.8 | 172.16.1.8 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 9 | NULL | NULL | 172.16.2.5 | 172.16.1.5 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-
-Endpoint 1 Outbound Security Associations:
-
-+---------+------------+-----------+----------------+------------------+
-| **SPI** | **Cipher** | **Auth** | **Tunnel src** | **Tunnel dst** |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 5 | AES-CBC | HMAC-SHA1 | 172.16.2.5 | 172.16.1.5 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 6 | AES-CBC | HMAC-SHA1 | 172.16.2.6 | 172.16.1.6 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 7 | AES-CBC | HMAC-SHA1 | 172.16.2.7 | 172.16.1.7 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 8 | AES-CBC | HMAC-SHA1 | 172.16.2.8 | 172.16.1.8 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 9 | NULL | NULL | 172.16.2.5 | 172.16.1.5 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-
-Endpoint 1 Inbound Security Associations:
-
-+---------+------------+-----------+----------------+------------------+
-| **SPI** | **Cipher** | **Auth** | **Tunnel src** | **Tunnel dst** |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 5 | AES-CBC | HMAC-SHA1 | 172.16.1.5 | 172.16.2.5 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 6 | AES-CBC | HMAC-SHA1 | 172.16.1.6 | 172.16.2.6 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 7 | AES-CBC | HMAC-SHA1 | 172.16.1.7 | 172.16.2.7 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 8 | AES-CBC | HMAC-SHA1 | 172.16.1.8 | 172.16.2.8 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
-| 9 | NULL | NULL | 172.16.1.5 | 172.16.2.5 |
-| | | | | |
-+---------+------------+-----------+----------------+------------------+
+The following are the default values:
+
+.. _table_ipsec_endpoint_outbound_sa:
+
+.. table:: Endpoint 0 Outbound Security Associations
+
+ +---------+----------+------------+-----------+----------------+----------------+
+ | **SPI** | **Mode** | **Cipher** | **Auth** | **Tunnel src** | **Tunnel dst** |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 5 | Tunnel | AES-CBC | HMAC-SHA1 | 172.16.1.5 | 172.16.2.5 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 6 | Tunnel | AES-CBC | HMAC-SHA1 | 172.16.1.6 | 172.16.2.6 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 10 | Trans | AES-CBC | HMAC-SHA1 | N/A | N/A |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 11 | Trans | AES-CBC | HMAC-SHA1 | N/A | N/A |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 15 | Tunnel | NULL | NULL | 172.16.1.5 | 172.16.2.5 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 16 | Tunnel | NULL | NULL | 172.16.1.6 | 172.16.2.6 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 25 | Tunnel | AES-CBC | HMAC-SHA1 | 1111:1111: | 2222:2222: |
+ | | | | | 1111:1111: | 2222:2222: |
+ | | | | | 1111:1111: | 2222:2222: |
+ | | | | | 1111:5555 | 2222:5555 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 26 | Tunnel | AES-CBC | HMAC-SHA1 | 1111:1111: | 2222:2222: |
+ | | | | | 1111:1111: | 2222:2222: |
+ | | | | | 1111:1111: | 2222:2222: |
+ | | | | | 1111:6666 | 2222:6666 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+
+.. _table_ipsec_endpoint_inbound_sa:
+
+.. table:: Endpoint 0 Inbound Security Associations
+
+ +---------+----------+------------+-----------+----------------+----------------+
+ | **SPI** | **Mode** | **Cipher** | **Auth** | **Tunnel src** | **Tunnel dst** |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 105 | Tunnel | AES-CBC | HMAC-SHA1 | 172.16.2.5 | 172.16.1.5 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 106 | Tunnel | AES-CBC | HMAC-SHA1 | 172.16.2.6 | 172.16.1.6 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 110 | Trans | AES-CBC | HMAC-SHA1 | N/A | N/A |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 111 | Trans | AES-CBC | HMAC-SHA1 | N/A | N/A |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 115 | Tunnel | NULL | NULL | 172.16.2.5 | 172.16.1.5 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 116 | Tunnel | NULL | NULL | 172.16.2.6 | 172.16.1.6 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 125 | Tunnel | AES-CBC | HMAC-SHA1 | 2222:2222: | 1111:1111: |
+ | | | | | 2222:2222: | 1111:1111: |
+ | | | | | 2222:2222: | 1111:1111: |
+ | | | | | 2222:5555 | 1111:5555 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+ | 126 | Tunnel | AES-CBC | HMAC-SHA1 | 2222:2222: | 1111:1111: |
+ | | | | | 2222:2222: | 1111:1111: |
+ | | | | | 2222:2222: | 1111:1111: |
+ | | | | | 2222:6666 | 1111:6666 |
+ | | | | | | |
+ +---------+----------+------------+-----------+----------------+----------------+
+
+For Endpoint 1, we use the same policies in reverse, meaning the Inbound SP
+entries are set as Outbound and vice versa.
+
Routing Initialization
~~~~~~~~~~~~~~~~~~~~~~
-The Routing is implemented using LPM table.
+The Routing is implemented using an LPM table.
Following default values:
-Endpoint 0 Routing Table:
-
-+------------------+----------+
-| **Dst addr** | **Port** |
-| | |
-+------------------+----------+
-| 172.16.2.5/32 | 0 |
-| | |
-+------------------+----------+
-| 172.16.2.6/32 | 0 |
-| | |
-+------------------+----------+
-| 172.16.2.7/32 | 1 |
-| | |
-+------------------+----------+
-| 172.16.2.8/32 | 1 |
-| | |
-+------------------+----------+
-| 192.168.115.0/24 | 2 |
-| | |
-+------------------+----------+
-| 192.168.116.0/24 | 2 |
-| | |
-+------------------+----------+
-| 192.168.117.0/24 | 3 |
-| | |
-+------------------+----------+
-| 192.168.118.0/24 | 3 |
-| | |
-+------------------+----------+
-| 192.168.210.0/24 | 2 |
-| | |
-+------------------+----------+
-| 192.168.240.0/24 | 2 |
-| | |
-+------------------+----------+
-| 192.168.250.0/24 | 0 |
-| | |
-+------------------+----------+
-
-Endpoint 1 Routing Table:
-
-+------------------+----------+
-| **Dst addr** | **Port** |
-| | |
-+------------------+----------+
-| 172.16.1.5/32 | 2 |
-| | |
-+------------------+----------+
-| 172.16.1.6/32 | 2 |
-| | |
-+------------------+----------+
-| 172.16.1.7/32 | 3 |
-| | |
-+------------------+----------+
-| 172.16.1.8/32 | 3 |
-| | |
-+------------------+----------+
-| 192.168.105.0/24 | 0 |
-| | |
-+------------------+----------+
-| 192.168.106.0/24 | 0 |
-| | |
-+------------------+----------+
-| 192.168.107.0/24 | 1 |
-| | |
-+------------------+----------+
-| 192.168.108.0/24 | 1 |
-| | |
-+------------------+----------+
-| 192.168.200.0/24 | 0 |
-| | |
-+------------------+----------+
-| 192.168.240.0/24 | 2 |
-| | |
-+------------------+----------+
-| 192.168.250.0/24 | 0 |
-| | |
-+------------------+----------+
+.. _table_ipsec_endpoint_outbound_routing:
+
+.. table:: Endpoint 0 Routing Table
+
+ +------------------+----------+
+ | **Dst addr** | **Port** |
+ | | |
+ +------------------+----------+
+ | 172.16.2.5/32 | 0 |
+ | | |
+ +------------------+----------+
+ | 172.16.2.6/32 | 1 |
+ | | |
+ +------------------+----------+
+ | 192.168.175.0/24 | 0 |
+ | | |
+ +------------------+----------+
+ | 192.168.176.0/24 | 1 |
+ | | |
+ +------------------+----------+
+ | 192.168.240.0/24 | 0 |
+ | | |
+ +------------------+----------+
+ | 192.168.241.0/24 | 1 |
+ | | |
+ +------------------+----------+
+ | 192.168.115.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.116.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 192.168.65.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.66.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 192.168.185.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.186.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 192.168.210.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.211.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 192.168.245.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.246.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 2222:2222: | 0 |
+ | 2222:2222: | |
+ | 2222:2222: | |
+ | 2222:5555/116 | |
+ | | |
+ +------------------+----------+
+ | 2222:2222: | 1 |
+ | 2222:2222: | |
+ | 2222:2222: | |
+ | 2222:6666/116 | |
+ | | |
+ +------------------+----------+
+ | 0000:0000: | 0 |
+ | 1111:1111: | |
+ | 0000:0000: | |
+ | 0000:0000/116 | |
+ | | |
+ +------------------+----------+
+ | 0000:0000: | 1 |
+ | 1111:1111: | |
+ | 1111:1111: | |
+ | 0000:0000/116 | |
+ | | |
+ +------------------+----------+
+ | ffff:0000: | 2 |
+ | 0000:0000: | |
+ | aaaa:aaaa: | |
+ | 0000:0/116 | |
+ | | |
+ +------------------+----------+
+ | ffff:0000: | 3 |
+ | 0000:0000: | |
+ | bbbb:bbbb: | |
+ | 0000:0/116 | |
+ | | |
+ +------------------+----------+
+ | ffff:0000: | 2 |
+ | 0000:0000: | |
+ | 5555:5555: | |
+ | 0000:0/116 | |
+ | | |
+ +------------------+----------+
+ | ffff:0000: | 3 |
+ | 0000:0000: | |
+ | 6666:6666: | |
+ | 0000:0/116 | |
+ | | |
+ +------------------+----------+
+ | ffff:0000: | 2 |
+ | 1111:1111: | |
+ | 0000:0000: | |
+ | 0000:0000/116 | |
+ | | |
+ +------------------+----------+
+ | ffff:0000: | 3 |
+ | 1111:1111: | |
+ | 1111:1111: | |
+ | 0000:0000/116 | |
+ | | |
+ +------------------+----------+
+
+.. _table_ipsec_endpoint_inbound_routing:
+
+.. table:: Endpoint 1 Routing Table
+
+ +------------------+----------+
+ | **Dst addr** | **Port** |
+ | | |
+ +------------------+----------+
+ | 172.16.1.5/32 | 0 |
+ | | |
+ +------------------+----------+
+ | 172.16.1.6/32 | 1 |
+ | | |
+ +------------------+----------+
+ | 192.168.185.0/24 | 0 |
+ | | |
+ +------------------+----------+
+ | 192.168.186.0/24 | 1 |
+ | | |
+ +------------------+----------+
+ | 192.168.245.0/24 | 0 |
+ | | |
+ +------------------+----------+
+ | 192.168.246.0/24 | 1 |
+ | | |
+ +------------------+----------+
+ | 192.168.105.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.106.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 192.168.55.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.56.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 192.168.175.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.176.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 192.168.200.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.201.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 192.168.240.0/24 | 2 |
+ | | |
+ +------------------+----------+
+ | 192.168.241.0/24 | 3 |
+ | | |
+ +------------------+----------+
+ | 1111:1111: | 0 |
+ | 1111:1111: | |
+ | 1111:1111: | |
+ | 1111:5555/116 | |
+ | | |
+ +------------------+----------+
+ | 1111:1111: | 1 |
+ | 1111:1111: | |
+ | 1111:1111: | |
+ | 1111:6666/116 | |
+ | | |
+ +------------------+----------+
+ | ffff:0000: | 0 |
+ | 1111:1111: | |
+ | 0000:0000: | |
+ | 0000:0000/116 | |
+ | | |
+ +------------------+----------+
+ | ffff:0000: | 1 |
+ | 1111:1111: | |
+ | 1111:1111: | |
+ | 0000:0000/116 | |
+ | | |
+ +------------------+----------+
+ | 0000:0000: | 2 |
+ | 0000:0000: | |
+ | aaaa:aaaa: | |
+ | 0000:0/116 | |
+ | | |
+ +------------------+----------+
+ | 0000:0000: | 3 |
+ | 0000:0000: | |
+ | bbbb:bbbb: | |
+ | 0000:0/116 | |
+ | | |
+ +------------------+----------+
+ | 0000:0000: | 2 |
+ | 0000:0000: | |
+ | 5555:5555: | |
+ | 0000:0/116 | |
+ | | |
+ +------------------+----------+
+ | 0000:0000: | 3 |
+ | 0000:0000: | |
+ | 6666:6666: | |
+ | 0000:0/116 | |
+ | | |
+ +------------------+----------+
+ | 0000:0000: | 2 |
+ | 1111:1111: | |
+ | 0000:0000: | |
+ | 0000:0000/116 | |
+ | | |
+ +------------------+----------+
+ | 0000:0000: | 3 |
+ | 1111:1111: | |
+ | 1111:1111: | |
+ | 0000:0000/116 | |
+ | | |
+ +------------------+----------+
diff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst
index 67ea944e..72da8c42 100644
--- a/doc/guides/sample_app_ug/ipv4_multicast.rst
+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst
@@ -193,7 +193,7 @@ Firstly, the Ethernet* header is removed from the packet and the IPv4 address is
/* Remove the Ethernet header from the input packet */
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, sizeof(struct ether_hdr));
- RTE_MBUF_ASSERT(iphdr != NULL);
+ RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
Then, the packet is checked to see if it has a multicast destination address and
@@ -271,7 +271,7 @@ The actual packet transmission is done in the mcast_send_pkt() function:
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t) sizeof(*ethdr));
- RTE_MBUF_ASSERT(ethdr != NULL);
+ RTE_ASSERT(ethdr != NULL);
ether_addr_copy(dest_addr, &ethdr->d_addr);
ether_addr_copy(&ports_eth_addr[port], &ethdr->s_addr);
diff --git a/doc/guides/sample_app_ug/l2_forward_crypto.rst b/doc/guides/sample_app_ug/l2_forward_crypto.rst
index 7cce51be..723376c5 100644
--- a/doc/guides/sample_app_ug/l2_forward_crypto.rst
+++ b/doc/guides/sample_app_ug/l2_forward_crypto.rst
@@ -167,11 +167,11 @@ To run the application in linuxapp environment with 2 lcores, 2 ports and 2 cryp
.. code-block:: console
- $ ./build/l2fwd -c 0x3 -n 4 --vdev "cryptodev_aesni_mb_pmd" \
+ $ ./build/l2fwd-crypto -c 0x3 -n 4 --vdev "cryptodev_aesni_mb_pmd" \
--vdev "cryptodev_aesni_mb_pmd" -- -p 0x3 --chain CIPHER_HASH \
--cipher_op ENCRYPT --cipher_algo AES_CBC \
--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f \
- --auth_op GENERATE --auth_algo SHA1_HMAC \
+ --auth_op GENERATE --auth_algo AES_XCBC_MAC \
--auth_key 10:11:12:13:14:15:16:17:18:19:1a:1b:1c:1d:1e:1f
Refer to the *DPDK Getting Started Guide* for general information on running applications
diff --git a/doc/guides/sample_app_ug/l2_forward_job_stats.rst b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
index 03d99779..2444e36e 100644
--- a/doc/guides/sample_app_ug/l2_forward_job_stats.rst
+++ b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
@@ -238,9 +238,6 @@ in the *DPDK Programmer's Guide* and the *DPDK API Reference*.
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
diff --git a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
index e77d67c7..b51b2dc9 100644
--- a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
+++ b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
@@ -242,9 +242,6 @@ in the *DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*.
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
diff --git a/doc/guides/sample_app_ug/l3_forward.rst b/doc/guides/sample_app_ug/l3_forward.rst
index 3e070d0b..491f99d9 100644
--- a/doc/guides/sample_app_ug/l3_forward.rst
+++ b/doc/guides/sample_app_ug/l3_forward.rst
@@ -324,7 +324,7 @@ The key code snippet of simple_ipv4_fwd_4pkts() is shown below:
const void *key_array[4] = {&key[0], &key[1], &key[2],&key[3]};
- rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 4, ret);
+ rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0], 4, ret);
dst_port[0] = (ret[0] < 0)? portid:ipv4_l3fwd_out_if[ret[0]];
dst_port[1] = (ret[1] < 0)? portid:ipv4_l3fwd_out_if[ret[1]];
diff --git a/doc/guides/sample_app_ug/link_status_intr.rst b/doc/guides/sample_app_ug/link_status_intr.rst
index a4dbb545..779df97d 100644
--- a/doc/guides/sample_app_ug/link_status_intr.rst
+++ b/doc/guides/sample_app_ug/link_status_intr.rst
@@ -145,9 +145,6 @@ To fully understand this code, it is recommended to study the chapters that rela
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/*
* Each logical core is assigned a dedicated TX queue on each port.
*/
diff --git a/doc/guides/sample_app_ug/pdump.rst b/doc/guides/sample_app_ug/pdump.rst
new file mode 100644
index 00000000..96c8709e
--- /dev/null
+++ b/doc/guides/sample_app_ug/pdump.rst
@@ -0,0 +1,122 @@
+
+.. BSD LICENSE
+ Copyright(c) 2016 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+dpdk_pdump Application
+======================
+
+The ``dpdk_pdump`` application is a Data Plane Development Kit (DPDK) application that runs as a DPDK secondary process and
+is capable of enabling packet capture on dpdk ports.
+
+
+Running the Application
+-----------------------
+
+The application has a ``--pdump`` command line option with various sub arguments:
+
+.. code-block:: console
+
+ ./build/app/dpdk_pdump --
+ --pdump '(port=<port id> | device_id=<pci id or vdev name>),
+ (queue=<queue_id>),
+ (rx-dev=<iface or pcap file> |
+ tx-dev=<iface or pcap file>),
+ [ring-size=<ring size>],
+ [mbuf-size=<mbuf data size>],
+ [total-num-mbufs=<number of mbufs>]'
+
+Note:
+
+* Parameters inside the parentheses represents mandatory parameters.
+
+* Parameters inside the square brackets represents optional parameters.
+
+Multiple instances of ``--pdump`` can be passed to capture packets on different port and queue combinations.
+
+
+Parameters
+~~~~~~~~~~
+
+``port``:
+Port id of the eth device on which packets should be captured.
+
+``device_id``:
+PCI address (or) name of the eth device on which packets should be captured.
+
+ .. Note::
+
+ * As of now the ``dpdk_pdump`` tool cannot capture the packets of virtual devices
+ in the primary process due to a bug in the ethdev library. Due to this bug, in a multi process context,
+ when the primary and secondary have different ports set, then the secondary process
+ (here the ``dpdk_pdump`` tool) overwrites the ``rte_eth_devices[]`` entries of the primary process.
+
+``queue``:
+Queue id of the eth device on which packets should be captured. The user can pass a queue value of ``*`` to enable
+packet capture on all queues of the eth device.
+
+``rx-dev``:
+Can be either a pcap file name or any Linux iface.
+
+``tx-dev``:
+Can be either a pcap file name or any Linux iface.
+
+ .. Note::
+
+ * To receive ingress packets only, ``rx-dev`` should be passed.
+
+ * To receive egress packets only, ``tx-dev`` should be passed.
+
+ * To receive ingress and egress packets separately ``rx-dev`` and ``tx-dev``
+ should both be passed with the different file names or the Linux iface names.
+
+ * To receive ingress and egress packets separately ``rx-dev`` and ``tx-dev``
+ should both be passed with the same file names or the the Linux iface names.
+
+``ring-size``:
+Size of the ring. This value is used internally for ring creation. The ring will be used to enqueue the packets from
+the primary application to the secondary. This is an optional parameter with default size 16384.
+
+``mbuf-size``:
+Size of the mbuf data. This is used internally for mempool creation. Ideally this value must be same as
+the primary application's mempool's mbuf data size which is used for packet RX. This is an optional parameter with
+default size 2176.
+
+``total-num-mbufs``:
+Total number mbufs in mempool. This is used internally for mempool creation. This is an optional parameter with default
+value 65535.
+
+
+Example
+-------
+
+.. code-block:: console
+
+ $ sudo ./build/app/dpdk_pdump -- --pdump 'port=0,queue=*,rx-dev=/tmp/rx.pcap'
diff --git a/doc/guides/sample_app_ug/vhost.rst b/doc/guides/sample_app_ug/vhost.rst
index 47ce36ce..a93e54d8 100644
--- a/doc/guides/sample_app_ug/vhost.rst
+++ b/doc/guides/sample_app_ug/vhost.rst
@@ -491,39 +491,9 @@ The default value is 15.
-- --rx-retry 1 --rx-retry-delay 20
**Zero copy.**
-The zero copy option enables/disables the zero copy mode for RX/TX packet,
-in the zero copy mode the packet buffer address from guest translate into host physical address
-and then set directly as DMA address.
-If the zero copy mode is disabled, then one copy mode is utilized in the sample.
-This option is disabled by default.
-
-.. code-block:: console
-
- ./vhost-switch -c f -n 4 --socket-mem 1024 --huge-dir /mnt/huge \
- -- --zero-copy [0,1]
-
-**RX descriptor number.**
-The RX descriptor number option specify the Ethernet RX descriptor number,
-Linux legacy virtio-net has different behavior in how to use the vring descriptor from DPDK based virtio-net PMD,
-the former likely allocate half for virtio header, another half for frame buffer,
-while the latter allocate all for frame buffer,
-this lead to different number for available frame buffer in vring,
-and then lead to different Ethernet RX descriptor number could be used in zero copy mode.
-So it is valid only in zero copy mode is enabled. The value is 32 by default.
-
-.. code-block:: console
-
- ./vhost-switch -c f -n 4 --socket-mem 1024 --huge-dir /mnt/huge \
- -- --zero-copy 1 --rx-desc-num [0, n]
-
-**TX descriptor number.**
-The TX descriptor number option specify the Ethernet TX descriptor number, it is valid only in zero copy mode is enabled.
-The value is 64 by default.
-
-.. code-block:: console
-
- ./vhost-switch -c f -n 4 --socket-mem 1024 --huge-dir /mnt/huge \
- -- --zero-copy 1 --tx-desc-num [0, n]
+Zero copy mode is removed, due to it has not been working for a while. And
+due to the large and complex code, it's better to redesign it than fixing
+it to make it work again. Hence, zero copy may be added back later.
**VLAN strip.**
The VLAN strip option enable/disable the VLAN strip on host, if disabled, the guest will receive the packets with VLAN tag.
@@ -863,3 +833,20 @@ For example:
The above message indicates that device 0 has been registered with MAC address cc:bb:bb:bb:bb:bb and VLAN tag 1000.
Any packets received on the NIC with these values is placed on the devices receive queue.
When a virtio-net device transmits packets, the VLAN tag is added to the packet by the DPDK vhost sample code.
+
+Running virtio-user with vhost-switch
+-------------------------------------
+
+We can also use virtio-user with vhost-switch now.
+Virtio-user is a virtual device that can be run in a application (container) parallelly with vhost in the same OS,
+aka, there is no need to start a VM. We just run it with a different --file-prefix to avoid startup failure.
+
+.. code-block:: console
+
+ cd ${RTE_SDK}/x86_64-native-linuxapp-gcc/app
+ ./testpmd -c 0x3 -n 4 --socket-mem 1024 --no-pci --file-prefix=virtio-user-testpmd \
+ --vdev=virtio-user0,mac=00:01:02:03:04:05,path=$path_vhost \
+ -- -i --txqflags=0xf01 --disable-hw-vlan
+
+There is no difference on the vhost side.
+Pleae note that there are some limitations (see release note for more information) in the usage of virtio-user.
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index f6055645..7712bd24 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -289,6 +289,10 @@ The commandline options are:
Enable hardware RX checksum offload.
+* ``--enable-scatter``
+
+ Enable scatter (multi-segment) RX.
+
* ``--disable-hw-vlan``
Disable hardware VLAN.
@@ -329,7 +333,6 @@ The commandline options are:
io (the default)
mac
- mac_retry
mac_swap
flowgen
rxonly
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index aed5e477..30e410dd 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -98,9 +98,11 @@ Start packet forwarding with current configuration::
start tx_first
~~~~~~~~~~~~~~
-Start packet forwarding with current configuration after sending one burst of packets::
+Start packet forwarding with current configuration after sending specified number of bursts of packets::
- testpmd> start tx_first
+ testpmd> start tx_first (""|burst_num)
+
+The default burst number is 1 when ``burst_num`` not presented.
stop
~~~~
@@ -177,6 +179,10 @@ For example:
ipv6-sctp
ipv6-other
l2_payload
+ port
+ vxlan
+ geneve
+ nvgre
show port rss reta
~~~~~~~~~~~~~~~~~~
@@ -249,8 +255,10 @@ set fwd
Set the packet forwarding mode::
- testpmd> set fwd (io|mac|mac_retry|macswap|flowgen| \
- rxonly|txonly|csum|icmpecho)
+ testpmd> set fwd (io|mac|macswap|flowgen| \
+ rxonly|txonly|csum|icmpecho) (""|retry)
+
+``retry`` can be specified for forwarding engines except ``rx_only``.
The available information categories are:
@@ -260,8 +268,6 @@ The available information categories are:
* ``mac``: Changes the source and the destination Ethernet addresses of packets before forwarding them.
-* ``mac_retry``: Same as "mac" forwarding mode, but includes retries if the destination queue is full.
-
* ``macswap``: MAC swap forwarding mode.
Swaps the source and the destination Ethernet addresses of packets before forwarding them.
@@ -392,9 +398,9 @@ Set number of packets per burst::
This is equivalent to the ``--burst command-line`` option.
-In ``mac_retry`` forwarding mode, the transmit delay time and number of retries can also be set::
+When retry is enabled, the transmit delay time and number of retries can also be set::
- testpmd> set burst tx delay (micrseconds) retry (num)
+ testpmd> set burst tx delay (microseconds) retry (num)
set txpkts
~~~~~~~~~~
@@ -980,7 +986,9 @@ The following sections show functions for configuring ports.
port attach
~~~~~~~~~~~
-Attach a port specified by pci address or virtual device args.
+Attach a port specified by pci address or virtual device args::
+
+ testpmd> port attach (identifier)
To attach a new pci device, the device should be recognized by kernel first.
Then it should be moved under DPDK management.
@@ -1014,8 +1022,6 @@ For example, to move a pci device using ixgbe under DPDK management:
To attach a port created by virtual device, above steps are not needed.
-port attach (identifier)
-
For example, to attach a port whose pci address is 0000:0a:00.0.
.. code-block:: console
@@ -1061,16 +1067,19 @@ the mode and slave parameters must be given.
port detach
~~~~~~~~~~~
-Detach a specific port.
-
-Before detaching a port, the port should be closed::
+Detach a specific port::
testpmd> port detach (port_id)
+Before detaching a port, the port should be stopped and closed.
+
For example, to detach a pci device port 0.
.. code-block:: console
+ testpmd> port stop 0
+ Stopping ports...
+ Done
testpmd> port close 0
Closing ports...
Done
@@ -1088,6 +1097,9 @@ For example, to detach a virtual device port 0.
.. code-block:: console
+ testpmd> port stop 0
+ Stopping ports...
+ Done
testpmd> port close 0
Closing ports...
Done
@@ -1187,6 +1199,26 @@ CRC stripping is off by default.
The ``on`` option is equivalent to the ``--crc-strip`` command-line option.
+port config - scatter
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Set RX scatter mode on or off for all ports::
+
+ testpmd> port config all scatter (on|off)
+
+RX scatter mode is off by default.
+
+The ``on`` option is equivalent to the ``--enable-scatter`` command-line option.
+
+port config - TX queue flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set a hexadecimal bitmap of TX queue flags for all ports::
+
+ testpmd> port config all txqflags value
+
+This command is equivalent to the ``--txqflags`` command-line option.
+
port config - RX Checksum
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1258,7 +1290,7 @@ port config - RSS
Set the RSS (Receive Side Scaling) mode on or off::
- testpmd> port config all rss (all|ip|tcp|udp|sctp|ether|none)
+ testpmd> port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none)
RSS is on by default.
@@ -1782,13 +1814,13 @@ Different NICs may have different capabilities, command show port fdir (port_id)
For example, to add an ipv4-udp flow type filter::
- testpmd> flow_director_filter 0 add flow ipv4-udp src 2.2.2.3 32 \
+ testpmd> flow_director_filter 0 mode IP add flow ipv4-udp src 2.2.2.3 32 \
dst 2.2.2.5 33 tos 2 ttl 40 vlan 0x1 flexbytes (0x88,0x48) \
fwd pf queue 1 fd_id 1
For example, add an ipv4-other flow type filter::
- testpmd> flow_director_filter 0 add flow ipv4-other src 2.2.2.3 \
+ testpmd> flow_director_filter 0 mode IP add flow ipv4-other src 2.2.2.3 \
dst 2.2.2.5 tos 2 proto 20 ttl 40 vlan 0x1 \
flexbytes (0x88,0x48) fwd pf queue 1 fd_id 1
@@ -1821,7 +1853,7 @@ Set flow director's input masks::
Example, to set flow director mask on port 0::
- testpmd> flow_director_mask 0 vlan 0xefff \
+ testpmd> flow_director_mask 0 mode IP vlan 0xefff \
src_mask 255.255.255.255 \
FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF 0xFFFF \
dst_mask 255.255.255.255 \
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index b4205385..dc4ef7f9 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -35,6 +35,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index aa2621bd..5898cae1 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -30,9 +30,11 @@
include $(RTE_SDK)/mk/rte.vars.mk
+ifneq ($(MAKECMDGOALS),clean)
ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
endif
+endif
# library name
LIB = librte_pmd_aesni_gcm.a
@@ -47,9 +49,10 @@ LIBABIVER := 1
# versioning export map
EXPORT_MAP := rte_pmd_aesni_gcm_version.map
-# external library include paths
+# external library dependencies
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
LDLIBS += -lcrypto
# library source files
@@ -62,6 +65,8 @@ SYMLINK-y-include +=
# library dependencies
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_ring
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index ec65291f..d3994cc6 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -49,9 +49,10 @@ LIBABIVER := 1
# versioning export map
EXPORT_MAP := rte_pmd_aesni_version.map
-# external library include paths
+# external library dependencies
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
@@ -60,6 +61,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
# library dependencies
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_ring
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_cryptodev
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 3415ac1b..6554fc4e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -222,6 +222,9 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
case RTE_CRYPTO_CIPHER_AES_CBC:
sess->cipher.mode = CBC;
break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.mode = CNTR;
+ break;
default:
MB_LOG_ERR("Unsupported cipher mode parameter");
return -1;
@@ -379,9 +382,11 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
/* append space for output data to mbuf */
char *odata = rte_pktmbuf_append(m_dst,
rte_pktmbuf_data_len(op->sym->m_src));
- if (odata == NULL)
+ if (odata == NULL) {
MB_LOG_ERR("failed to allocate space in destination "
"mbuf for source data");
+ return NULL;
+ }
memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
rte_pktmbuf_data_len(op->sym->m_src));
@@ -560,7 +565,7 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
goto flush_jobs;
else
qp->stats.enqueued_count += processed_jobs;
- return i;
+ return i;
flush_jobs:
/*
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3806a66e..d3c46ace 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -207,6 +207,26 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
diff --git a/drivers/crypto/kasumi/Makefile b/drivers/crypto/kasumi/Makefile
new file mode 100644
index 00000000..9fb0be85
--- /dev/null
+++ b/drivers/crypto/kasumi/Makefile
@@ -0,0 +1,69 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_KASUMI_PATH),)
+$(error "Please define LIBSSO_KASUMI_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_kasumi.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_kasumi_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build
+LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd_ops.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
new file mode 100644
index 00000000..5f8c7a2e
--- /dev/null
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -0,0 +1,657 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_kasumi_pmd_private.h"
+
+#define KASUMI_KEY_LENGTH 16
+#define KASUMI_IV_LENGTH 8
+#define KASUMI_DIGEST_LENGTH 4
+#define KASUMI_MAX_BURST 4
+#define BYTE_LEN 8
+
+/**
+ * Global static parameter used to create a unique name for each KASUMI
+ * crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_KASUMI_PMD,
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/** Get xform chain order. */
+static enum kasumi_operation
+kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return KASUMI_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return KASUMI_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return KASUMI_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return KASUMI_OP_AUTH_CIPHER;
+ else
+ return KASUMI_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return KASUMI_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return KASUMI_OP_CIPHER_AUTH;
+ else
+ return KASUMI_OP_NOT_SUPPORTED;
+ }
+
+ return KASUMI_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ int mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = kasumi_get_mode(xform);
+
+ switch (mode) {
+ case KASUMI_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+ /* Fall-through */
+ case KASUMI_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case KASUMI_OP_ONLY_AUTH:
+ auth_xform = xform;
+ }
+
+ if (mode == KASUMI_OP_NOT_SUPPORTED) {
+ KASUMI_LOG_ERR("Unsupported operation chain order parameter");
+ return -EINVAL;
+ }
+
+ if (cipher_xform) {
+ /* Only KASUMI F8 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8)
+ return -EINVAL;
+ /* Initialize key */
+ sso_kasumi_init_f8_key_sched(xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only KASUMI F9 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9)
+ return -EINVAL;
+ sess->auth_op = auth_xform->auth.op;
+ /* Initialize key */
+ sso_kasumi_init_f9_key_sched(xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get KASUMI session. */
+static struct kasumi_session *
+kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
+{
+ struct kasumi_session *sess;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_KASUMI_PMD))
+ return NULL;
+
+ sess = (struct kasumi_session *)op->sym->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct kasumi_session *)c_sess->_private;
+
+ if (unlikely(kasumi_set_session_parameters(sess,
+ op->sym->xform) != 0))
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_kasumi_cipher_op(struct rte_crypto_op **ops,
+ struct kasumi_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[num_ops], *dst[num_ops];
+ uint64_t IV[num_ops];
+ uint32_t num_bytes[num_ops];
+
+ for (i = 0; i < num_ops; i++) {
+ /* Sanity checks. */
+ if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("iv");
+ break;
+ }
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ IV[i] = *((uint64_t *)(ops[i]->sym->cipher.iv.data));
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ if (processed_ops != 0)
+ sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, IV,
+ src, dst, num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
+ struct kasumi_session *session)
+{
+ uint8_t *src, *dst;
+ uint64_t IV;
+ uint32_t length_in_bits, offset_in_bits;
+
+ /* Sanity checks. */
+ if (unlikely(op->sym->cipher.iv.length != KASUMI_IV_LENGTH)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("iv");
+ return 0;
+ }
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ dst = op->sym->m_dst ?
+ rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *) :
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ IV = *((uint64_t *)(op->sym->cipher.iv.data));
+ length_in_bits = op->sym->cipher.data.length;
+
+ sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_kasumi_hash_op(struct rte_crypto_op **ops,
+ struct kasumi_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+ uint32_t num_bytes;
+ uint32_t shift_bits;
+ uint64_t IV;
+ uint8_t direction;
+
+ for (i = 0; i < num_ops; i++) {
+ if (unlikely(ops[i]->sym->auth.aad.length != KASUMI_IV_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("aad");
+ break;
+ }
+
+ if (unlikely(ops[i]->sym->auth.digest.length != KASUMI_DIGEST_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("digest");
+ break;
+ }
+
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+ /* IV from AAD */
+ IV = *((uint64_t *)(ops[i]->sym->auth.aad.data));
+ /* Direction from next bit after end of message */
+ num_bytes = (length_in_bits >> 3) + 1;
+ shift_bits = (BYTE_LEN - 1 - length_in_bits) % BYTE_LEN;
+ direction = (src[num_bytes - 1] >> shift_bits) & 0x01;
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+
+ sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
+ IV, src,
+ length_in_bits, dst, direction);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ops[i]->sym->auth.digest.length) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
+ IV, src,
+ length_in_bits, dst, direction);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
+ struct kasumi_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (session->op) {
+ case KASUMI_OP_ONLY_CIPHER:
+ processed_ops = process_kasumi_cipher_op(ops,
+ session, num_ops);
+ break;
+ case KASUMI_OP_ONLY_AUTH:
+ processed_ops = process_kasumi_hash_op(ops, session,
+ num_ops);
+ break;
+ case KASUMI_OP_CIPHER_AUTH:
+ processed_ops = process_kasumi_cipher_op(ops, session,
+ num_ops);
+ process_kasumi_hash_op(ops, session, processed_ops);
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ processed_ops = process_kasumi_hash_op(ops, session,
+ num_ops);
+ process_kasumi_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
+ struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned enqueued_op, processed_op;
+
+ switch (session->op) {
+ case KASUMI_OP_ONLY_CIPHER:
+ processed_op = process_kasumi_cipher_op_bit(op,
+ session);
+ break;
+ case KASUMI_OP_ONLY_AUTH:
+ processed_op = process_kasumi_hash_op(&op, session, 1);
+ break;
+ case KASUMI_OP_CIPHER_AUTH:
+ processed_op = process_kasumi_cipher_op_bit(op, session);
+ if (processed_op == 1)
+ process_kasumi_hash_op(&op, session, 1);
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ processed_op = process_kasumi_hash_op(&op, session, 1);
+ if (processed_op == 1)
+ process_kasumi_cipher_op_bit(op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
+ processed_op);
+ qp->qp_stats.enqueued_count += enqueued_op;
+ *accumulated_enqueued_ops += enqueued_op;
+
+ return enqueued_op;
+}
+
+static uint16_t
+kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[nb_ops];
+ struct rte_crypto_op *curr_c_op;
+
+ struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
+ struct kasumi_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = kasumi_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone. */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == KASUMI_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+kasumi_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct kasumi_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_kasumi_uninit(const char *name);
+
+static int
+cryptodev_kasumi_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct kasumi_private *internals;
+ uint64_t cpu_flags = 0;
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ else {
+ KASUMI_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Create a unique device name. */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ KASUMI_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct kasumi_private), init_params->socket_id);
+ if (dev == NULL) {
+ KASUMI_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_KASUMI_PMD;
+ dev->dev_ops = rte_kasumi_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = kasumi_pmd_dequeue_burst;
+ dev->enqueue_burst = kasumi_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", name);
+
+ cryptodev_kasumi_uninit(crypto_dev_name);
+ return -EFAULT;
+}
+
+static int
+cryptodev_kasumi_init(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_kasumi_create(name, &init_params);
+}
+
+static int
+cryptodev_kasumi_uninit(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing KASUMI crypto device %s"
+ " on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_driver cryptodev_kasumi_pmd_drv = {
+ .name = CRYPTODEV_NAME_KASUMI_PMD,
+ .type = PMD_VDEV,
+ .init = cryptodev_kasumi_init,
+ .uninit = cryptodev_kasumi_uninit
+};
+
+PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv);
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
new file mode 100644
index 00000000..da5854eb
--- /dev/null
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -0,0 +1,344 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_kasumi_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 9,
+ .max = 9,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+kasumi_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+kasumi_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+kasumi_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct kasumi_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = kasumi_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ rte_ring_free(qp->processed_ops);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct kasumi_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "kasumi_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size == ring_size) {
+ KASUMI_LOG_INFO("Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ KASUMI_LOG_ERR("Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct kasumi_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ kasumi_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (kasumi_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = kasumi_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+kasumi_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the KASUMI session structure */
+static unsigned
+kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct kasumi_session);
+}
+
+/** Configure a KASUMI session from a crypto xform chain */
+static void *
+kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ KASUMI_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (kasumi_set_session_parameters(sess, xform) != 0) {
+ KASUMI_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+kasumi_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct kasumi_session));
+}
+
+struct rte_cryptodev_ops kasumi_pmd_ops = {
+ .dev_configure = kasumi_pmd_config,
+ .dev_start = kasumi_pmd_start,
+ .dev_stop = kasumi_pmd_stop,
+ .dev_close = kasumi_pmd_close,
+
+ .stats_get = kasumi_pmd_stats_get,
+ .stats_reset = kasumi_pmd_stats_reset,
+
+ .dev_infos_get = kasumi_pmd_info_get,
+
+ .queue_pair_setup = kasumi_pmd_qp_setup,
+ .queue_pair_release = kasumi_pmd_qp_release,
+ .queue_pair_start = kasumi_pmd_qp_start,
+ .queue_pair_stop = kasumi_pmd_qp_stop,
+ .queue_pair_count = kasumi_pmd_qp_count,
+
+ .session_get_size = kasumi_pmd_session_get_size,
+ .session_configure = kasumi_pmd_session_configure,
+ .session_clear = kasumi_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
new file mode 100644
index 00000000..04e1c437
--- /dev/null
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
+#define _RTE_KASUMI_PMD_PRIVATE_H_
+
+#include <sso_kasumi.h>
+
+#define KASUMI_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_KASUMI_PMD, \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_KASUMI_DEBUG
+#define KASUMI_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_KASUMI_PMD, \
+ __func__, __LINE__, ## args)
+
+#define KASUMI_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_KASUMI_PMD, \
+ __func__, __LINE__, ## args)
+#else
+#define KASUMI_LOG_INFO(fmt, args...)
+#define KASUMI_LOG_DBG(fmt, args...)
+#endif
+
+/** private data structure for each virtual KASUMI device */
+struct kasumi_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** KASUMI buffer queue pair */
+struct kasumi_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+enum kasumi_operation {
+ KASUMI_OP_ONLY_CIPHER,
+ KASUMI_OP_ONLY_AUTH,
+ KASUMI_OP_CIPHER_AUTH,
+ KASUMI_OP_AUTH_CIPHER,
+ KASUMI_OP_NOT_SUPPORTED
+};
+
+/** KASUMI private session structure */
+struct kasumi_session {
+ /* Keys have to be 16-byte aligned */
+ sso_kasumi_key_sched_t pKeySched_cipher;
+ sso_kasumi_key_sched_t pKeySched_hash;
+ enum kasumi_operation op;
+ enum rte_crypto_auth_operation auth_op;
+} __rte_cache_aligned;
+
+
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
+
+#endif /* _RTE_KASUMI_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/kasumi/rte_pmd_kasumi_version.map b/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
new file mode 100644
index 00000000..8ffeca93
--- /dev/null
+++ b/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
@@ -0,0 +1,3 @@
+DPDK_16.07 {
+ local: *;
+};
diff --git a/drivers/crypto/null/Makefile b/drivers/crypto/null/Makefile
index 2173277b..c143929f 100644
--- a/drivers/crypto/null/Makefile
+++ b/drivers/crypto/null/Makefile
@@ -54,6 +54,8 @@ SYMLINK-y-include +=
# library dependencies
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_ring
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_cryptodev
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index 258c2d5f..20a70d4a 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -38,6 +38,7 @@ LIBABIVER := 1
# build flags
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
# external library include paths
CFLAGS += -I$(SRCDIR)/qat_adf
@@ -58,6 +59,7 @@ EXPORT_MAP := rte_pmd_qat_version.map
# library dependencies
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_cryptodev
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index bcccdf4f..aa108d47 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -616,10 +616,11 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
* Write (the length of AAD) into bytes 16-19 of state2
* in big-endian format. This field is 8 bytes
*/
- *(uint32_t *)&(hash->sha.state1[
+ uint32_t *aad_len = (uint32_t *)&hash->sha.state1[
ICP_QAT_HW_GALOIS_128_STATE1_SZ +
- ICP_QAT_HW_GALOIS_H_SZ]) =
- rte_bswap32(add_auth_data_length);
+ ICP_QAT_HW_GALOIS_H_SZ];
+ *aad_len = rte_bswap32(add_auth_data_length);
+
proto = ICP_QAT_FW_LA_GCM_PROTO;
} else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 495ea1c7..940b2b63 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -263,6 +263,26 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -276,14 +296,15 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
{
struct qat_session *sess = session;
- phys_addr_t cd_paddr = sess->cd_paddr;
+ phys_addr_t cd_paddr;
PMD_INIT_FUNC_TRACE();
if (session) {
+ cd_paddr = sess->cd_paddr;
memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
-
sess->cd_paddr = cd_paddr;
- }
+ } else
+ PMD_DRV_LOG(ERR, "NULL session");
}
static int
@@ -368,6 +389,14 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
@@ -380,7 +409,6 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_3DES_CBC:
case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_CTR:
case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
@@ -807,12 +835,15 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
return data - mult;
}
-void qat_crypto_sym_session_init(struct rte_mempool *mp, void *priv_sess)
+void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
{
- struct qat_session *s = priv_sess;
+ struct rte_cryptodev_sym_session *sess = sym_sess;
+ struct qat_session *s = (void *)sess->_private;
PMD_INIT_FUNC_TRACE();
- s->cd_paddr = rte_mempool_virt2phy(mp, &s->cd);
+ s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
+ offsetof(struct qat_session, cd) +
+ offsetof(struct rte_cryptodev_sym_session, _private);
}
int qat_dev_config(__rte_unused struct rte_cryptodev *dev)
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index a7912f5a..f46ec857 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -69,10 +69,7 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
static struct rte_pci_id pci_id_qat_map[] = {
{
- .vendor_id = 0x8086,
- .device_id = 0x0443,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(0x8086, 0x0443),
},
{.device_id = 0},
};
diff --git a/drivers/crypto/snow3g/Makefile b/drivers/crypto/snow3g/Makefile
index ee582702..bea6760b 100644
--- a/drivers/crypto/snow3g/Makefile
+++ b/drivers/crypto/snow3g/Makefile
@@ -30,8 +30,10 @@
include $(RTE_SDK)/mk/rte.vars.mk
-ifeq ($(LIBSSO_PATH),)
-$(error "Please define LIBSSO_PATH environment variable")
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_SNOW3G_PATH),)
+$(error "Please define LIBSSO_SNOW3G_PATH environment variable")
+endif
endif
# library name
@@ -47,10 +49,11 @@ LIBABIVER := 1
# versioning export map
EXPORT_MAP := rte_pmd_snow3g_version.map
-# external library include paths
-CFLAGS += -I$(LIBSSO_PATH)
-CFLAGS += -I$(LIBSSO_PATH)/include
-CFLAGS += -I$(LIBSSO_PATH)/build
+# external library dependencies
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/include
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/build
+LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c
@@ -59,6 +62,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd_ops.c
# library dependencies
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_ring
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_cryptodev
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index f3e0e667..dc8de6bd 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -38,10 +38,11 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_kvargs.h>
#include "rte_snow3g_pmd_private.h"
+#define SNOW3G_IV_LENGTH 16
+#define SNOW3G_DIGEST_LENGTH 4
#define SNOW3G_MAX_BURST 8
#define BYTE_LEN 8
@@ -198,20 +199,12 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
for (i = 0; i < num_ops; i++) {
/* Sanity checks. */
- if (ops[i]->sym->cipher.iv.length != 16) {
+ if (unlikely(ops[i]->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
SNOW3G_LOG_ERR("iv");
break;
}
- if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->cipher.data.offset
- % BYTE_LEN) != 0)) {
- ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("Data Length or offset");
- break;
- }
-
src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
dst[i] = ops[i]->sym->m_dst ?
@@ -231,6 +224,39 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
return processed_ops;
}
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
+ struct snow3g_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *IV;
+ uint32_t length_in_bits, offset_in_bits;
+
+ /* Sanity checks. */
+ if (unlikely(op->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("iv");
+ return 0;
+ }
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ if (op->sym->m_dst == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("bit-level in-place not supported\n");
+ return 0;
+ }
+ dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+ IV = op->sym->cipher.iv.data;
+ length_in_bits = op->sym->cipher.data.length;
+
+ sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
/** Generate/verify hash from mbufs with same hash key. */
static int
process_snow3g_hash_op(struct rte_crypto_op **ops,
@@ -243,23 +269,22 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
uint32_t length_in_bits;
for (i = 0; i < num_ops; i++) {
- if (ops[i]->sym->auth.aad.length != 16) {
+ if (unlikely(ops[i]->sym->auth.aad.length != SNOW3G_IV_LENGTH)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
SNOW3G_LOG_ERR("aad");
break;
}
- if (ops[i]->sym->auth.digest.length != 4) {
+ if (unlikely(ops[i]->sym->auth.digest.length != SNOW3G_DIGEST_LENGTH)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
SNOW3G_LOG_ERR("digest");
break;
}
- if (((ops[i]->sym->auth.data.length % BYTE_LEN) != 0)
- || ((ops[i]->sym->auth.data.offset
- % BYTE_LEN) != 0)) {
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("Data Length or offset");
+ SNOW3G_LOG_ERR("Offset");
break;
}
@@ -299,10 +324,11 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
/** Process a batch of crypto ops which shares the same session. */
static int
process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
- struct snow3g_qp *qp, uint8_t num_ops)
+ struct snow3g_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
{
unsigned i;
- unsigned processed_ops;
+ unsigned enqueued_ops, processed_ops;
switch (session->op) {
case SNOW3G_OP_ONLY_CIPHER:
@@ -342,7 +368,63 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
}
}
- return processed_ops;
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
+ struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned enqueued_op, processed_op;
+
+ switch (session->op) {
+ case SNOW3G_OP_ONLY_CIPHER:
+ processed_op = process_snow3g_cipher_op_bit(op,
+ session);
+ break;
+ case SNOW3G_OP_ONLY_AUTH:
+ processed_op = process_snow3g_hash_op(&op, session, 1);
+ break;
+ case SNOW3G_OP_CIPHER_AUTH:
+ processed_op = process_snow3g_cipher_op_bit(op, session);
+ if (processed_op == 1)
+ process_snow3g_hash_op(&op, session, 1);
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ processed_op = process_snow3g_hash_op(&op, session, 1);
+ if (processed_op == 1)
+ process_snow3g_cipher_op_bit(op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)&op, processed_op);
+ qp->qp_stats.enqueued_count += enqueued_op;
+ *accumulated_enqueued_ops += enqueued_op;
+
+ return enqueued_op;
}
static uint16_t
@@ -354,7 +436,7 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
struct snow3g_qp *qp = queue_pair;
- unsigned i, n;
+ unsigned i;
uint8_t burst_size = 0;
uint16_t enqueued_ops = 0;
uint8_t processed_ops;
@@ -370,8 +452,32 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
curr_c_op->status =
RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
- return enqueued_ops;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone. */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((curr_c_op->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
}
/* Batch ops that share the same session. */
@@ -385,20 +491,14 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
* process them, and start a new batch.
*/
if (burst_size == SNOW3G_MAX_BURST) {
- processed_ops = process_ops(c_ops,
- prev_sess, qp, burst_size);
- n = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)c_ops,
- processed_ops);
- qp->qp_stats.enqueued_count += n;
- enqueued_ops += n;
- if (n < burst_size) {
- qp->qp_stats.enqueue_err_count +=
- nb_ops - enqueued_ops;
- return enqueued_ops;
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
}
- burst_size = 0;
+ burst_size = 0;
prev_sess = NULL;
}
} else {
@@ -406,41 +506,27 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
* Different session, process the ops
* of the previous session.
*/
- processed_ops = process_ops(c_ops,
- prev_sess, qp, burst_size);
- n = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)c_ops,
- processed_ops);
- qp->qp_stats.enqueued_count += n;
- enqueued_ops += n;
- if (n < burst_size) {
- qp->qp_stats.enqueue_err_count +=
- nb_ops - enqueued_ops;
- return enqueued_ops;
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
}
- burst_size = 0;
+ burst_size = 0;
prev_sess = curr_sess;
+
c_ops[burst_size++] = curr_c_op;
}
}
if (burst_size != 0) {
/* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops,
- prev_sess, qp, burst_size);
- n = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)c_ops,
- processed_ops);
- qp->qp_stats.enqueued_count += n;
- enqueued_ops += n;
- if (n < burst_size) {
- qp->qp_stats.enqueue_err_count +=
- nb_ops - enqueued_ops;
- return enqueued_ops;
- }
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
}
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
return enqueued_ops;
}
@@ -468,6 +554,16 @@ cryptodev_snow3g_create(const char *name,
struct rte_cryptodev *dev;
char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct snow3g_private *internals;
+ uint64_t cpu_flags = 0;
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ else {
+ SNOW3G_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
/* Create a unique device name. */
if (create_unique_device_name(crypto_dev_name,
@@ -491,7 +587,8 @@ cryptodev_snow3g_create(const char *name,
dev->enqueue_burst = snow3g_pmd_enqueue_burst;
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
internals = dev->data->dev_private;
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3386a673..bc932309 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -45,10 +45,13 @@ DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
DIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
+DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += pcap
+DIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede
DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += ring
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
+DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx
DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt
diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile
index cb1a7aea..e14d6d0c 100644
--- a/drivers/net/af_packet/Makefile
+++ b/drivers/net/af_packet/Makefile
@@ -51,7 +51,9 @@ CFLAGS += $(WERROR_FLAGS)
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c
# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_kvargs
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index f17bd7e7..2d7f3448 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -78,6 +78,7 @@ struct pkt_rx_queue {
volatile unsigned long rx_pkts;
volatile unsigned long err_pkts;
+ volatile unsigned long rx_bytes;
};
struct pkt_tx_queue {
@@ -90,6 +91,7 @@ struct pkt_tx_queue {
volatile unsigned long tx_pkts;
volatile unsigned long err_pkts;
+ volatile unsigned long tx_bytes;
};
struct pmd_internals {
@@ -131,6 +133,7 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint8_t *pbuf;
struct pkt_rx_queue *pkt_q = queue;
uint16_t num_rx = 0;
+ unsigned long num_rx_bytes = 0;
unsigned int framecount, framenum;
if (unlikely(nb_pkts == 0))
@@ -167,9 +170,11 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* account for the receive frame */
bufs[i] = mbuf;
num_rx++;
+ num_rx_bytes += mbuf->pkt_len;
}
pkt_q->framenum = framenum;
pkt_q->rx_pkts += num_rx;
+ pkt_q->rx_bytes += num_rx_bytes;
return num_rx;
}
@@ -186,6 +191,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct pollfd pfd;
struct pkt_tx_queue *pkt_q = queue;
uint16_t num_tx = 0;
+ unsigned long num_tx_bytes = 0;
int i;
if (unlikely(nb_pkts == 0))
@@ -219,6 +225,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
num_tx++;
+ num_tx_bytes += mbuf->pkt_len;
rte_pktmbuf_free(mbuf);
}
@@ -229,6 +236,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
pkt_q->framenum = framenum;
pkt_q->tx_pkts += num_tx;
pkt_q->err_pkts += nb_pkts - num_tx;
+ pkt_q->tx_bytes += num_tx_bytes;
return num_tx;
}
@@ -287,13 +295,16 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
{
unsigned i, imax;
unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
const struct pmd_internals *internal = dev->data->dev_private;
imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (i = 0; i < imax; i++) {
igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
+ igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
rx_total += igb_stats->q_ipackets[i];
+ rx_bytes_total += igb_stats->q_ibytes[i];
}
imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
@@ -301,13 +312,17 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
for (i = 0; i < imax; i++) {
igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
+ igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
tx_total += igb_stats->q_opackets[i];
tx_err_total += igb_stats->q_errors[i];
+ tx_bytes_total += igb_stats->q_obytes[i];
}
igb_stats->ipackets = rx_total;
+ igb_stats->ibytes = rx_bytes_total;
igb_stats->opackets = tx_total;
igb_stats->oerrors = tx_err_total;
+ igb_stats->obytes = tx_bytes_total;
}
static void
@@ -316,12 +331,15 @@ eth_stats_reset(struct rte_eth_dev *dev)
unsigned i;
struct pmd_internals *internal = dev->data->dev_private;
- for (i = 0; i < internal->nb_queues; i++)
+ for (i = 0; i < internal->nb_queues; i++) {
internal->rx_queue[i].rx_pkts = 0;
+ internal->rx_queue[i].rx_bytes = 0;
+ }
for (i = 0; i < internal->nb_queues; i++) {
internal->tx_queue[i].tx_pkts = 0;
internal->tx_queue[i].err_pkts = 0;
+ internal->tx_queue[i].tx_bytes = 0;
}
}
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 6f1f385d..c2ddd8d7 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -14,6 +14,10 @@ EXPORT_MAP := rte_pmd_bnx2x_version.map
LIBABIVER := 1
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -wd188 #188: enumerated type mixed with another type
+endif
+
#
# all source are stored in SRCS-y
#
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 6edb2f95..3095d2bb 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -32,18 +32,20 @@
#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
#define BNX2X_PMD_VERSION_MAJOR 1
#define BNX2X_PMD_VERSION_MINOR 0
-#define BNX2X_PMD_VERSION_PATCH 0
+#define BNX2X_PMD_VERSION_REVISION 1
+#define BNX2X_PMD_VERSION_PATCH 1
static inline const char *
bnx2x_pmd_version(void)
{
static char version[32];
- snprintf(version, sizeof(version), "%s %s_%d.%d.%d",
+ snprintf(version, sizeof(version), "%s %s_%d.%d.%d.%d",
BNX2X_PMD_VER_PREFIX,
BNX2X_DRIVER_VERSION,
BNX2X_PMD_VERSION_MAJOR,
BNX2X_PMD_VERSION_MINOR,
+ BNX2X_PMD_VERSION_REVISION,
BNX2X_PMD_VERSION_PATCH);
return version;
@@ -1293,7 +1295,7 @@ bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue
struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)];
if (likely(tx_mbuf != NULL)) {
- rte_pktmbuf_free(tx_mbuf);
+ rte_pktmbuf_free_seg(tx_mbuf);
} else {
PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu",
fp->index, (unsigned long)TX_BD(pkt_idx, txq));
@@ -2113,147 +2115,127 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
* the mbuf and return to the caller.
*
* Returns:
- * 0 = Success, !0 = Failure
+ * int: Number of TX BDs used for the mbuf
+ *
* Note the side effect that an mbuf may be freed if it causes a problem.
*/
-int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts)
+int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
{
- struct rte_mbuf *m0;
struct eth_tx_start_bd *tx_start_bd;
uint16_t bd_prod, pkt_prod;
- int m_tx;
struct bnx2x_softc *sc;
uint32_t nbds = 0;
- struct bnx2x_fastpath *fp;
sc = txq->sc;
- fp = &sc->fp[txq->queue_id];
-
bd_prod = txq->tx_bd_tail;
pkt_prod = txq->tx_pkt_tail;
- for (m_tx = 0; m_tx < m_pkts; m_tx++) {
-
- m0 = *m_head++;
-
- if (unlikely(txq->nb_tx_avail < 3)) {
- PMD_TX_LOG(ERR, "no enough bds %d/%d",
- bd_prod, txq->nb_tx_avail);
- return -ENOMEM;
- }
+ txq->sw_ring[TX_BD(pkt_prod, txq)] = m0;
- txq->sw_ring[TX_BD(pkt_prod, txq)] = m0;
+ tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
- tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
+ tx_start_bd->addr =
+ rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
+ tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
+ tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+ tx_start_bd->general_data =
+ (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
- tx_start_bd->addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
- tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
- tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- tx_start_bd->general_data =
- (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+ tx_start_bd->nbd = rte_cpu_to_le_16(2);
- tx_start_bd->nbd = rte_cpu_to_le_16(2);
+ if (m0->ol_flags & PKT_TX_VLAN_PKT) {
+ tx_start_bd->vlan_or_ethertype =
+ rte_cpu_to_le_16(m0->vlan_tci);
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_OUTBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ } else {
+ if (IS_PF(sc))
+ tx_start_bd->vlan_or_ethertype =
+ rte_cpu_to_le_16(pkt_prod);
+ else {
+ struct ether_hdr *eh =
+ rte_pktmbuf_mtod(m0, struct ether_hdr *);
- if (m0->ol_flags & PKT_TX_VLAN_PKT) {
tx_start_bd->vlan_or_ethertype =
- rte_cpu_to_le_16(m0->vlan_tci);
- tx_start_bd->bd_flags.as_bitfield |=
- (X_ETH_OUTBAND_VLAN <<
- ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
- } else {
- if (IS_PF(sc))
- tx_start_bd->vlan_or_ethertype =
- rte_cpu_to_le_16(pkt_prod);
- else {
- struct ether_hdr *eh
- = rte_pktmbuf_mtod(m0, struct ether_hdr *);
-
- tx_start_bd->vlan_or_ethertype
- = rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type));
- }
+ rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type));
}
+ }
- bd_prod = NEXT_TX_BD(bd_prod);
- if (IS_VF(sc)) {
- struct eth_tx_parse_bd_e2 *tx_parse_bd;
- const struct ether_hdr *eh = rte_pktmbuf_mtod(m0, struct ether_hdr *);
- uint8_t mac_type = UNICAST_ADDRESS;
-
- tx_parse_bd =
- &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
- if (is_multicast_ether_addr(&eh->d_addr)) {
- if (is_broadcast_ether_addr(&eh->d_addr))
- mac_type = BROADCAST_ADDRESS;
- else
- mac_type = MULTICAST_ADDRESS;
- }
- tx_parse_bd->parsing_data =
- (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
-
- rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
- &eh->d_addr.addr_bytes[0], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
- &eh->d_addr.addr_bytes[2], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
- &eh->d_addr.addr_bytes[4], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
- &eh->s_addr.addr_bytes[0], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
- &eh->s_addr.addr_bytes[2], 2);
- rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
- &eh->s_addr.addr_bytes[4], 2);
-
- tx_parse_bd->data.mac_addr.dst_hi =
- rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi);
- tx_parse_bd->data.mac_addr.dst_mid =
- rte_cpu_to_be_16(tx_parse_bd->data.
- mac_addr.dst_mid);
- tx_parse_bd->data.mac_addr.dst_lo =
- rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo);
- tx_parse_bd->data.mac_addr.src_hi =
- rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi);
- tx_parse_bd->data.mac_addr.src_mid =
- rte_cpu_to_be_16(tx_parse_bd->data.
- mac_addr.src_mid);
- tx_parse_bd->data.mac_addr.src_lo =
- rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo);
-
- PMD_TX_LOG(DEBUG,
- "PBD dst %x %x %x src %x %x %x p_data %x",
- tx_parse_bd->data.mac_addr.dst_hi,
- tx_parse_bd->data.mac_addr.dst_mid,
- tx_parse_bd->data.mac_addr.dst_lo,
- tx_parse_bd->data.mac_addr.src_hi,
- tx_parse_bd->data.mac_addr.src_mid,
- tx_parse_bd->data.mac_addr.src_lo,
- tx_parse_bd->parsing_data);
- }
+ bd_prod = NEXT_TX_BD(bd_prod);
+ if (IS_VF(sc)) {
+ struct eth_tx_parse_bd_e2 *tx_parse_bd;
+ const struct ether_hdr *eh =
+ rte_pktmbuf_mtod(m0, struct ether_hdr *);
+ uint8_t mac_type = UNICAST_ADDRESS;
+
+ tx_parse_bd =
+ &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
+ if (is_multicast_ether_addr(&eh->d_addr)) {
+ if (is_broadcast_ether_addr(&eh->d_addr))
+ mac_type = BROADCAST_ADDRESS;
+ else
+ mac_type = MULTICAST_ADDRESS;
+ }
+ tx_parse_bd->parsing_data =
+ (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
+ &eh->d_addr.addr_bytes[0], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
+ &eh->d_addr.addr_bytes[2], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
+ &eh->d_addr.addr_bytes[4], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
+ &eh->s_addr.addr_bytes[0], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
+ &eh->s_addr.addr_bytes[2], 2);
+ rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
+ &eh->s_addr.addr_bytes[4], 2);
+
+ tx_parse_bd->data.mac_addr.dst_hi =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi);
+ tx_parse_bd->data.mac_addr.dst_mid =
+ rte_cpu_to_be_16(tx_parse_bd->data.
+ mac_addr.dst_mid);
+ tx_parse_bd->data.mac_addr.dst_lo =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo);
+ tx_parse_bd->data.mac_addr.src_hi =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi);
+ tx_parse_bd->data.mac_addr.src_mid =
+ rte_cpu_to_be_16(tx_parse_bd->data.
+ mac_addr.src_mid);
+ tx_parse_bd->data.mac_addr.src_lo =
+ rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo);
PMD_TX_LOG(DEBUG,
- "start bd: nbytes %d flags %x vlan %x\n",
- tx_start_bd->nbytes,
- tx_start_bd->bd_flags.as_bitfield,
- tx_start_bd->vlan_or_ethertype);
+ "PBD dst %x %x %x src %x %x %x p_data %x",
+ tx_parse_bd->data.mac_addr.dst_hi,
+ tx_parse_bd->data.mac_addr.dst_mid,
+ tx_parse_bd->data.mac_addr.dst_lo,
+ tx_parse_bd->data.mac_addr.src_hi,
+ tx_parse_bd->data.mac_addr.src_mid,
+ tx_parse_bd->data.mac_addr.src_lo,
+ tx_parse_bd->parsing_data);
+ }
- bd_prod = NEXT_TX_BD(bd_prod);
- pkt_prod++;
+ PMD_TX_LOG(DEBUG,
+ "start bd: nbytes %d flags %x vlan %x\n",
+ tx_start_bd->nbytes,
+ tx_start_bd->bd_flags.as_bitfield,
+ tx_start_bd->vlan_or_ethertype);
- if (TX_IDX(bd_prod) < 2) {
- nbds++;
- }
- }
+ bd_prod = NEXT_TX_BD(bd_prod);
+ pkt_prod++;
+
+ if (TX_IDX(bd_prod) < 2)
+ nbds++;
- txq->nb_tx_avail -= m_pkts << 1;
+ txq->nb_tx_avail -= 2;
txq->tx_bd_tail = bd_prod;
txq->tx_pkt_tail = pkt_prod;
- mb();
- fp->tx_db.data.prod += (m_pkts << 1) + nbds;
- DOORBELL(sc, txq->queue_id, fp->tx_db.raw);
- mb();
-
- return 0;
+ return nbds + 2;
}
static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc)
@@ -9570,8 +9552,10 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
static void bnx2x_init_rte(struct bnx2x_softc *sc)
{
if (IS_VF(sc)) {
- sc->max_tx_queues = BNX2X_VF_MAX_QUEUES_PER_VF;
- sc->max_rx_queues = BNX2X_VF_MAX_QUEUES_PER_VF;
+ sc->max_tx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF,
+ sc->igu_sb_cnt);
+ sc->max_rx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF,
+ sc->igu_sb_cnt);
} else {
sc->max_tx_queues = 128;
sc->max_rx_queues = 128;
@@ -9713,7 +9697,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
pci_read(sc,
(sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val,
2);
- sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
+ sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
} else {
sc->igu_sb_cnt = 1;
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 135a6eb1..c24a5308 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -167,6 +167,8 @@ struct bnx2x_device_type {
#define TX_PAGE(x) (((x) & ~USABLE_TX_BD_PER_PAGE) >> 8)
#define TX_IDX(x) ((x) & USABLE_TX_BD_PER_PAGE)
+#define BDS_PER_TX_PKT (3)
+
/*
* Trigger pending transmits when the number of available BDs is greater
* than 1/8 of the total number of usable BDs.
@@ -1864,7 +1866,7 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc);
int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc);
void bnx2x_free_ilt_mem(struct bnx2x_softc *sc);
void bnx2x_dump_tx_chain(struct bnx2x_fastpath * fp, int bd_prod, int count);
-int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts);
+int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0);
uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp);
void bnx2x_print_adapter_info(struct bnx2x_softc *sc);
int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp);
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 071b44fe..3ff57c42 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -276,6 +276,9 @@ static void
bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct bnx2x_softc *sc = dev->data->dev_private;
+ uint32_t brb_truncate_discard;
+ uint64_t brb_drops;
+ uint64_t brb_truncates;
PMD_INIT_FUNC_TRACE();
@@ -316,6 +319,19 @@ bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->rx_nombuf =
HILO_U64(sc->eth_stats.no_buff_discard_hi,
sc->eth_stats.no_buff_discard_lo);
+
+ brb_drops =
+ HILO_U64(sc->eth_stats.brb_drop_hi,
+ sc->eth_stats.brb_drop_lo);
+
+ brb_truncates =
+ HILO_U64(sc->eth_stats.brb_truncate_hi,
+ sc->eth_stats.brb_truncate_lo);
+
+ brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
+
+ stats->imissed = brb_drops + brb_truncates +
+ brb_truncate_discard + stats->rx_nombuf;
}
static void
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index 752a5e81..0ec4f899 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -11,17 +11,6 @@
#include "bnx2x.h"
#include "bnx2x_rxtx.h"
-static inline struct rte_mbuf *
-bnx2x_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check(m, 0);
-
- return m;
-}
-
static const struct rte_memzone *
ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
uint16_t queue_id, uint32_t ring_size, int socket_id)
@@ -148,7 +137,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Initialize software ring entries */
rxq->rx_mbuf_alloc = 0;
for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
- mbuf = bnx2x_rxmbuf_alloc(mp);
+ mbuf = rte_mbuf_raw_alloc(mp);
if (NULL == mbuf) {
PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
(unsigned)rxq->queue_id, idx);
@@ -222,40 +211,40 @@ bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct bnx2x_tx_queue *txq;
struct bnx2x_softc *sc;
struct bnx2x_fastpath *fp;
- uint32_t burst, nb_tx;
- struct rte_mbuf **m = tx_pkts;
- int ret;
+ uint16_t nb_tx_pkts;
+ uint16_t nb_pkt_sent = 0;
+ uint32_t ret;
txq = p_txq;
sc = txq->sc;
fp = &sc->fp[txq->queue_id];
- nb_tx = nb_pkts;
-
- do {
- burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST);
-
- ret = bnx2x_tx_encap(txq, m, burst);
- if (unlikely(ret)) {
- PMD_TX_LOG(ERR, "tx_encap failed!");
- }
-
- bnx2x_update_fp_sb_idx(fp);
+ if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) >
+ txq->tx_free_thresh)))
+ bnx2x_txeof(sc, fp);
- if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) {
- bnx2x_txeof(sc, fp);
- }
+ nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT);
+ if (unlikely(nb_tx_pkts == 0))
+ return 0;
- if (unlikely(ret == -ENOMEM)) {
- break;
- }
+ while (nb_tx_pkts--) {
+ struct rte_mbuf *m = *tx_pkts++;
+ assert(m != NULL);
+ ret = bnx2x_tx_encap(txq, m);
+ fp->tx_db.data.prod += ret;
+ nb_pkt_sent++;
+ }
- m += burst;
- nb_pkts -= burst;
+ bnx2x_update_fp_sb_idx(fp);
+ mb();
+ DOORBELL(sc, txq->queue_id, fp->tx_db.raw);
+ mb();
- } while (nb_pkts);
+ if ((txq->nb_tx_desc - txq->nb_tx_avail) >
+ txq->tx_free_thresh)
+ bnx2x_txeof(sc, fp);
- return nb_tx - nb_pkts;
+ return nb_pkt_sent;
}
int
@@ -405,9 +394,11 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
len = cqe_fp->pkt_len_or_gro_seg_len;
pad = cqe_fp->placement_offset;
- new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool);
+ new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(!new_mb)) {
PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
goto next_rx;
}
@@ -427,7 +418,6 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mb->next = NULL;
rx_mb->pkt_len = rx_mb->data_len = len;
rx_mb->port = rxq->port_id;
- rx_mb->buf_len = len + pad;
rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
/*
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index b9149b89..149cc975 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -5029,7 +5029,7 @@ static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy,
if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
PMD_DRV_LOG(DEBUG, "1G parallel detect link on port %d",
params->port);
- return 1;
+ return ELINK_STATUS_ERROR;
}
CL22_RD_OVER_CL45(sc, phy,
@@ -5039,7 +5039,7 @@ static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy,
if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
PMD_DRV_LOG(DEBUG, "10G parallel detect link on port %d",
params->port);
- return 1;
+ return ELINK_STATUS_ERROR;
}
return ELINK_STATUS_OK;
}
@@ -6735,7 +6735,7 @@ static elink_status_t elink_8073_is_snr_needed(struct bnx2x_softc *sc,
if (val != 0x102)
return ELINK_STATUS_OK;
- return 1;
+ return ELINK_STATUS_ERROR;
}
static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
@@ -7557,7 +7557,7 @@ static elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
uint16_t byte_cnt,
uint8_t * o_buf)
{
- elink_status_t rc = 0;
+ elink_status_t rc = ELINK_STATUS_OK;
uint8_t xfer_size;
uint8_t *user_data = o_buf;
read_sfp_module_eeprom_func_p read_func;
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
new file mode 100644
index 00000000..d9c5a4cb
--- /dev/null
+++ b/drivers/net/bnxt/Makefile
@@ -0,0 +1,74 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2014 6WIND S.A.
+# Copyright(c) Broadcom Limited.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_bnxt.a
+
+LIBABIVER := 1
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_bnxt_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxr.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_stats.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
new file mode 100644
index 00000000..df1f7718
--- /dev/null
+++ b/drivers/net/bnxt/bnxt.h
@@ -0,0 +1,176 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_H_
+#define _BNXT_H_
+
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+
+#include "bnxt_cpr.h"
+
+#define BNXT_MAX_MTU 9000
+#define VLAN_TAG_SIZE 4
+
+enum bnxt_hw_context {
+ HW_CONTEXT_NONE = 0,
+ HW_CONTEXT_IS_RSS = 1,
+ HW_CONTEXT_IS_COS = 2,
+ HW_CONTEXT_IS_LB = 3,
+};
+
+struct bnxt_vf_info {
+ uint16_t fw_fid;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint16_t max_rsscos_ctx;
+ uint16_t max_cp_rings;
+ uint16_t max_tx_rings;
+ uint16_t max_rx_rings;
+ uint16_t max_l2_ctx;
+ uint16_t max_vnics;
+ struct bnxt_pf_info *pf;
+};
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID 1
+#define BNXT_MAX_VFS(bp) (bp->pf.max_vfs)
+#define BNXT_FIRST_VF_FID 128
+#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
+#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
+ uint32_t fw_fid;
+ uint8_t port_id;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint16_t max_rsscos_ctx;
+ uint16_t max_cp_rings;
+ uint16_t max_tx_rings;
+ uint16_t max_rx_rings;
+ uint16_t max_l2_ctx;
+ uint16_t max_vnics;
+ uint16_t first_vf_id;
+ uint16_t active_vfs;
+ uint16_t max_vfs;
+ void *vf_req_buf;
+ phys_addr_t vf_req_buf_dma_addr;
+ uint32_t vf_req_fwd[8];
+ struct bnxt_vf_info *vf;
+};
+
+/* Max wait time is 10 * 100ms = 1s */
+#define BNXT_LINK_WAIT_CNT 10
+#define BNXT_LINK_WAIT_INTERVAL 100
+struct bnxt_link_info {
+ uint8_t phy_flags;
+ uint8_t mac_type;
+ uint8_t phy_link_status;
+ uint8_t loop_back;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint8_t pause;
+ uint8_t force_pause;
+ uint8_t auto_pause;
+ uint8_t auto_mode;
+#define PHY_VER_LEN 3
+ uint8_t phy_ver[PHY_VER_LEN];
+ uint16_t link_speed;
+ uint16_t support_speeds;
+ uint16_t auto_link_speed;
+ uint16_t auto_link_speed_mask;
+ uint32_t preemphasis;
+};
+
+#define BNXT_COS_QUEUE_COUNT 8
+struct bnxt_cos_queue_info {
+ uint8_t id;
+ uint8_t profile;
+};
+
+struct bnxt {
+ void *bar0;
+
+ struct rte_eth_dev *eth_dev;
+ struct rte_pci_device *pdev;
+
+ uint32_t flags;
+#define BNXT_FLAG_REGISTERED (1 << 0)
+#define BNXT_FLAG_VF (1 << 1)
+#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+
+ unsigned int rx_nr_rings;
+ unsigned int rx_cp_nr_rings;
+ struct bnxt_rx_queue **rx_queues;
+
+ unsigned int tx_nr_rings;
+ unsigned int tx_cp_nr_rings;
+ struct bnxt_tx_queue **tx_queues;
+
+ /* Default completion ring */
+ struct bnxt_cp_ring_info *def_cp_ring;
+ uint32_t max_ring_grps;
+ struct bnxt_ring_grp_info *grp_info;
+
+ unsigned int nr_vnics;
+
+ struct bnxt_vnic_info *vnic_info;
+ STAILQ_HEAD(, bnxt_vnic_info) free_vnic_list;
+
+ struct bnxt_filter_info *filter_info;
+ STAILQ_HEAD(, bnxt_filter_info) free_filter_list;
+
+ /* VNIC pointer for flow filter (VMDq) pools */
+#define MAX_FF_POOLS ETH_64_POOLS
+ STAILQ_HEAD(, bnxt_vnic_info) ff_pool[MAX_FF_POOLS];
+
+#define MAX_NUM_MAC_ADDR 32
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+
+ uint16_t hwrm_cmd_seq;
+ void *hwrm_cmd_resp_addr;
+ phys_addr_t hwrm_cmd_resp_dma_addr;
+ rte_spinlock_t hwrm_lock;
+ uint16_t max_req_len;
+ uint16_t max_resp_len;
+
+ struct bnxt_link_info link_info;
+ struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT];
+
+ struct bnxt_pf_info pf;
+ struct bnxt_vf_info vf;
+};
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
new file mode 100644
index 00000000..60c277a1
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -0,0 +1,159 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ring.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Async event handling
+ */
+void bnxt_handle_async_event(struct bnxt *bp __rte_unused,
+ struct cmpl_base *cmp)
+{
+ struct hwrm_async_event_cmpl *async_cmp =
+ (struct hwrm_async_event_cmpl *)cmp;
+
+ /* TODO: HWRM async events are not defined yet */
+ /* Needs to handle: link events, error events, etc. */
+ switch (async_cmp->event_id) {
+ case 0:
+ /* Assume LINK_CHANGE == 0 */
+ RTE_LOG(INFO, PMD, "Link change event\n");
+
+ /* Can just prompt the update_op routine to do a qcfg
+ * instead of doing the actual qcfg
+ */
+ break;
+ case 1:
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n",
+ async_cmp->event_id);
+ break;
+ }
+}
+
+void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
+{
+ struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl;
+ struct input *fwd_cmd;
+ uint16_t logical_vf_id, error_code;
+
+ /* Qualify the fwd request */
+ if (fwd_cmpl->source_id < bp->pf.first_vf_id) {
+ RTE_LOG(ERR, PMD,
+ "FWD req's source_id 0x%x > first_vf_id 0x%x\n",
+ fwd_cmpl->source_id, bp->pf.first_vf_id);
+ error_code = HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED;
+ goto reject;
+ } else if (fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT >
+ 128 - sizeof(struct input)) {
+ RTE_LOG(ERR, PMD,
+ "FWD req's cmd len 0x%x > 108 bytes allowed\n",
+ fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT);
+ error_code = HWRM_ERR_CODE_INVALID_PARAMS;
+ goto reject;
+ }
+
+ /* Locate VF's forwarded command */
+ logical_vf_id = fwd_cmpl->source_id - bp->pf.first_vf_id;
+ fwd_cmd = (struct input *)((uint8_t *)bp->pf.vf_req_buf +
+ (logical_vf_id * 128));
+
+ /* Provision the request */
+ switch (fwd_cmd->req_type) {
+ case HWRM_CFA_L2_FILTER_ALLOC:
+ case HWRM_CFA_L2_FILTER_FREE:
+ case HWRM_CFA_L2_FILTER_CFG:
+ case HWRM_CFA_L2_SET_RX_MASK:
+ break;
+ default:
+ error_code = HWRM_ERR_CODE_INVALID_PARAMS;
+ goto reject;
+ }
+
+ /* Forward */
+ fwd_cmd->target_id = fwd_cmpl->source_id;
+ bnxt_hwrm_exec_fwd_resp(bp, fwd_cmd);
+ return;
+
+reject:
+ /* TODO: Encap the reject error resp into the hwrm_err_iput? */
+ /* Use the error_code for the reject cmd */
+ RTE_LOG(ERR, PMD,
+ "Error 0x%x found in the forward request\n", error_code);
+}
+
+/* For the default completion ring only */
+void bnxt_free_def_cp_ring(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ bnxt_free_ring(cpr->cp_ring_struct);
+ rte_free(cpr->cp_ring_struct);
+ rte_free(cpr);
+}
+
+/* For the default completion ring only */
+int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id)
+{
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_ring *ring;
+
+ cpr = rte_zmalloc_socket("cpr",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+ bp->def_cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ cpr->cp_ring_struct = ring;
+ ring->bd = (void *)cpr->cp_desc_ring;
+ ring->bd_dma = cpr->cp_desc_mapping;
+ ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
new file mode 100644
index 00000000..c176f8c5
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -0,0 +1,86 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_CPR_H_
+#define _BNXT_CPR_H_
+
+#define CMP_VALID(cmp, raw_cons, ring) \
+ (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \
+ !((raw_cons) & ((ring)->ring_size)))
+
+#define CMP_TYPE(cmp) \
+ (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
+
+#define ADV_RAW_CMP(idx, n) ((idx) + (n))
+#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
+#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask)
+#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+
+#define B_CP_DB_REARM(cpr, raw_cons) \
+ (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \
+ RING_CMP(cpr->cp_ring_struct, raw_cons)))
+
+#define B_CP_DIS_DB(cpr, raw_cons) \
+ (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \
+ RING_CMP(cpr->cp_ring_struct, raw_cons)))
+
+struct bnxt_ring;
+struct bnxt_cp_ring_info {
+ uint32_t cp_raw_cons;
+ void *cp_doorbell;
+
+ struct cmpl_base *cp_desc_ring;
+
+ phys_addr_t cp_desc_mapping;
+
+ struct ctx_hw_stats *hw_stats;
+ phys_addr_t hw_stats_map;
+ uint32_t hw_stats_ctx_id;
+
+ struct bnxt_ring *cp_ring_struct;
+};
+
+#define RX_CMP_L2_ERRORS \
+ (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR)
+
+
+struct bnxt;
+void bnxt_free_def_cp_ring(struct bnxt *bp);
+int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
+void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
+void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
new file mode 100644
index 00000000..406e38a6
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -0,0 +1,1049 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_stats.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+#define DRV_MODULE_NAME "bnxt"
+static const char bnxt_version[] =
+ "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
+
+static struct rte_pci_id bnxt_pci_id_map[] = {
+#define RTE_PCI_DEV_ID_DECL_BNXT(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+ {.device_id = 0},
+};
+
+#define BNXT_ETH_RSS_SUPPORT ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+/***********************/
+
+/*
+ * High level utility functions
+ */
+
+static void bnxt_free_mem(struct bnxt *bp)
+{
+ bnxt_free_filter_mem(bp);
+ bnxt_free_vnic_attributes(bp);
+ bnxt_free_vnic_mem(bp);
+
+ bnxt_free_stats(bp);
+ bnxt_free_tx_rings(bp);
+ bnxt_free_rx_rings(bp);
+ bnxt_free_def_cp_ring(bp);
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp)
+{
+ int rc;
+
+ /* Default completion ring */
+ rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
+ bp->def_cp_ring, "def_cp");
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_vnic_mem(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_vnic_attributes(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ rc = bnxt_alloc_filter_mem(bp);
+ if (rc)
+ goto alloc_mem_err;
+
+ return 0;
+
+alloc_mem_err:
+ bnxt_free_mem(bp);
+ return rc;
+}
+
+static int bnxt_init_chip(struct bnxt *bp)
+{
+ unsigned int i, rss_idx, fw_idx;
+ int rc;
+
+ rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_alloc_hwrm_rings(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_alloc_all_hwrm_ring_grps(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_mq_rx_configure(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ /* VNIC configuration */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n",
+ rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "HWRM vnic ctx alloc failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n",
+ rc);
+ goto err_out;
+ }
+ if (vnic->rss_table && vnic->hash_type) {
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0;
+ rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ if (vnic->fw_grp_ids[fw_idx] ==
+ INVALID_HW_RING_ID)
+ fw_idx = 0;
+ vnic->rss_table[rss_idx] =
+ vnic->fw_grp_ids[fw_idx];
+ }
+ rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "HWRM vnic set RSS failure rc: %x\n",
+ rc);
+ goto err_out;
+ }
+ }
+ }
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ bnxt_free_all_hwrm_resources(bp);
+
+ return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp)
+{
+ bnxt_free_all_hwrm_resources(bp);
+ bnxt_free_all_filters(bp);
+ bnxt_free_all_vnics(bp);
+ return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp)
+{
+ int rc;
+
+ bnxt_init_ring_grps(bp);
+ bnxt_init_vnics(bp);
+ bnxt_init_filters(bp);
+
+ rc = bnxt_init_chip(bp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * Device configuration and status function
+ */
+
+static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint16_t max_vnics, i, j, vpool, vrxq;
+
+ /* MAC Specifics */
+ dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
+ dev_info->max_hash_mac_addrs = 0;
+
+ /* PF/VF specifics */
+ if (BNXT_PF(bp)) {
+ dev_info->max_rx_queues = bp->pf.max_rx_rings;
+ dev_info->max_tx_queues = bp->pf.max_tx_rings;
+ dev_info->max_vfs = bp->pf.active_vfs;
+ dev_info->reta_size = bp->pf.max_rsscos_ctx;
+ max_vnics = bp->pf.max_vnics;
+ } else {
+ dev_info->max_rx_queues = bp->vf.max_rx_rings;
+ dev_info->max_tx_queues = bp->vf.max_tx_rings;
+ dev_info->reta_size = bp->vf.max_rsscos_ctx;
+ max_vnics = bp->vf.max_vnics;
+ }
+
+ /* Fast path specifics */
+ dev_info->min_rx_bufsize = 1;
+ dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + VLAN_TAG_SIZE;
+ dev_info->rx_offload_capa = 0;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ /* *INDENT-OFF* */
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 0,
+ },
+ .rx_free_thresh = 32,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = 32,
+ .hthresh = 0,
+ .wthresh = 0,
+ },
+ .tx_free_thresh = 32,
+ .tx_rs_thresh = 32,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ };
+ /* *INDENT-ON* */
+
+ /*
+ * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
+ * need further investigation.
+ */
+
+ /* VMDq resources */
+ vpool = 64; /* ETH_64_POOLS */
+ vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
+ for (i = 0; i < 4; vpool >>= 1, i++) {
+ if (max_vnics > vpool) {
+ for (j = 0; j < 5; vrxq >>= 1, j++) {
+ if (dev_info->max_rx_queues > vrxq) {
+ if (vpool > vrxq)
+ vpool = vrxq;
+ goto found;
+ }
+ }
+ /* Not enough resources to support VMDq */
+ break;
+ }
+ }
+ /* Not enough resources to support VMDq */
+ vpool = 0;
+ vrxq = 0;
+found:
+ dev_info->max_vmdq_pools = vpool;
+ dev_info->vmdq_queue_num = vrxq;
+
+ dev_info->vmdq_pool_base = 0;
+ dev_info->vmdq_queue_base = 0;
+}
+
+/* Configure the device based on the configuration provided */
+static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ int rc;
+
+ bp->rx_queues = (void *)eth_dev->data->rx_queues;
+ bp->tx_queues = (void *)eth_dev->data->tx_queues;
+
+ /* Inherit new configurations */
+ bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
+ bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
+ bp->rx_cp_nr_rings = bp->rx_nr_rings;
+ bp->tx_cp_nr_rings = bp->tx_nr_rings;
+
+ if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+ eth_dev->data->mtu =
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
+ rc = bnxt_set_hwrm_link_config(bp, true);
+ return rc;
+}
+
+static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ int rc;
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
+ rc = -1;
+ goto error;
+ }
+
+ rc = bnxt_alloc_mem(bp);
+ if (rc)
+ goto error;
+
+ rc = bnxt_init_nic(bp);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ bnxt_shutdown_nic(bp);
+ bnxt_free_tx_mbufs(bp);
+ bnxt_free_rx_mbufs(bp);
+ bnxt_free_mem(bp);
+ return rc;
+}
+
+static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ eth_dev->data->dev_link.link_status = 1;
+ bnxt_set_hwrm_link_config(bp, true);
+ return 0;
+}
+
+static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ eth_dev->data->dev_link.link_status = 0;
+ bnxt_set_hwrm_link_config(bp, false);
+ return 0;
+}
+
+static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ bnxt_free_tx_mbufs(bp);
+ bnxt_free_rx_mbufs(bp);
+ bnxt_free_mem(bp);
+ rte_free(eth_dev->data->mac_addrs);
+}
+
+/* Unload the driver, release resources */
+static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ if (bp->eth_dev->data->dev_started) {
+ /* TBD: STOP HW queues DMA */
+ eth_dev->data->dev_link.link_status = 0;
+ }
+ bnxt_shutdown_nic(bp);
+}
+
+static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
+ uint32_t index)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_filter_info *filter, *temp_filter;
+ int i;
+
+ /*
+ * Loop through all VNICs from the specified filter flow pools to
+ * remove the corresponding MAC addr filter
+ */
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ if (!(pool_mask & (1 << i)))
+ continue;
+
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+ if (filter->mac_index == index) {
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ bnxt_hwrm_clear_filter(bp, filter);
+ filter->mac_index = INVALID_MAC_INDEX;
+ memset(&filter->l2_addr, 0,
+ ETHER_ADDR_LEN);
+ STAILQ_INSERT_TAIL(
+ &bp->free_filter_list,
+ filter, next);
+ }
+ filter = temp_filter;
+ }
+ }
+ }
+}
+
+static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
+ struct bnxt_filter_info *filter;
+
+ if (!vnic) {
+ RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
+ return;
+ }
+ /* Attach requested MAC address to the new l2_filter */
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ if (filter->mac_index == index) {
+ RTE_LOG(ERR, PMD,
+ "MAC addr already existed for pool %d\n", pool);
+ return;
+ }
+ }
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ return;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ filter->mac_index = index;
+ memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
+ bnxt_hwrm_set_filter(bp, vnic, filter);
+}
+
+static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
+ int wait_to_complete)
+{
+ int rc = 0;
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_link new;
+ unsigned int cnt = BNXT_LINK_WAIT_CNT;
+
+ memset(&new, 0, sizeof(new));
+ do {
+ /* Retrieve link info from hardware */
+ rc = bnxt_get_hwrm_link_config(bp, &new);
+ if (rc) {
+ new.link_speed = ETH_LINK_SPEED_100M;
+ new.link_duplex = ETH_LINK_FULL_DUPLEX;
+ RTE_LOG(ERR, PMD,
+ "Failed to retrieve link rc = 0x%x!", rc);
+ goto out;
+ }
+ if (!wait_to_complete)
+ break;
+
+ rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
+
+ } while (!new.link_status && cnt--);
+
+ /* Timed out or success */
+ if (new.link_status) {
+ /* Update only if success */
+ eth_dev->data->dev_link.link_duplex = new.link_duplex;
+ eth_dev->data->dev_link.link_speed = new.link_speed;
+ }
+ eth_dev->data->dev_link.link_status = new.link_status;
+out:
+ return rc;
+}
+
+static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags |= BNXT_VNIC_INFO_PROMISC;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags &= ~BNXT_VNIC_INFO_PROMISC;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags |= BNXT_VNIC_INFO_ALLMULTI;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+
+ vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI;
+ bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic);
+}
+
+static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic;
+ int i;
+
+ if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+ return -EINVAL;
+
+ if (reta_size != HW_HASH_INDEX_SIZE) {
+ RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ "(%d) must equal the size supported by the hardware "
+ "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ return -EINVAL;
+ }
+ /* Update the RSS VNIC(s) */
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ memcpy(vnic->rss_table, reta_conf, reta_size);
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ }
+ return 0;
+}
+
+static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ /* Retrieve from the default VNIC */
+ if (!vnic)
+ return -EINVAL;
+ if (!vnic->rss_table)
+ return -EINVAL;
+
+ if (reta_size != HW_HASH_INDEX_SIZE) {
+ RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ "(%d) must equal the size supported by the hardware "
+ "(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
+ return -EINVAL;
+ }
+ /* EW - need to revisit here copying from u64 to u16 */
+ memcpy(reta_conf, vnic->rss_table, reta_size);
+
+ return 0;
+}
+
+static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_vnic_info *vnic;
+ uint16_t hash_type = 0;
+ int i;
+
+ /*
+ * If RSS enablement were different than dev_configure,
+ * then return -EINVAL
+ */
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (!rss_conf->rss_hf)
+ return -EINVAL;
+ } else {
+ if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
+ return -EINVAL;
+ }
+ if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+
+ /* Update the RSS VNIC(s) */
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ vnic->hash_type = hash_type;
+
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss_conf->rss_key &&
+ rss_conf->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+
+ bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ }
+ return 0;
+}
+
+static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int len;
+ uint32_t hash_types;
+
+ /* RSS configuration is the same for all VNICs */
+ if (vnic && vnic->rss_hash_key) {
+ if (rss_conf->rss_key) {
+ len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ?
+ rss_conf->rss_key_len : HW_HASH_KEY_SIZE;
+ memcpy(rss_conf->rss_key, vnic->rss_hash_key, len);
+ }
+
+ hash_types = vnic->hash_type;
+ rss_conf->rss_hf = 0;
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_IPV4;
+ hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_IPV6;
+ hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ }
+ if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) {
+ rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ hash_types &=
+ ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+ }
+ if (hash_types) {
+ RTE_LOG(ERR, PMD,
+ "Unknwon RSS config from firmware (%08x), RSS disabled",
+ vnic->hash_type);
+ return -ENOTSUP;
+ }
+ } else {
+ rss_conf->rss_hf = 0;
+ }
+ return 0;
+}
+
+static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf __rte_unused)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_link link_info;
+ int rc;
+
+ rc = bnxt_get_hwrm_link_config(bp, &link_info);
+ if (rc)
+ return rc;
+
+ memset(fc_conf, 0, sizeof(*fc_conf));
+ if (bp->link_info.auto_pause)
+ fc_conf->autoneg = 1;
+ switch (bp->link_info.pause) {
+ case 0:
+ fc_conf->mode = RTE_FC_NONE;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX |
+ HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX):
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ }
+ return 0;
+}
+
+static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+
+ switch (fc_conf->mode) {
+ case RTE_FC_NONE:
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause = 0;
+ break;
+ case RTE_FC_RX_PAUSE:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
+ }
+ break;
+ case RTE_FC_TX_PAUSE:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
+ }
+ break;
+ case RTE_FC_FULL:
+ if (fc_conf->autoneg) {
+ bp->link_info.auto_pause =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX |
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
+ bp->link_info.force_pause = 0;
+ } else {
+ bp->link_info.auto_pause = 0;
+ bp->link_info.force_pause =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX |
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
+ }
+ break;
+ }
+ return bnxt_set_hwrm_link_config(bp, true);
+}
+
+/*
+ * Initialization
+ */
+
+static struct eth_dev_ops bnxt_dev_ops = {
+ .dev_infos_get = bnxt_dev_info_get_op,
+ .dev_close = bnxt_dev_close_op,
+ .dev_configure = bnxt_dev_configure_op,
+ .dev_start = bnxt_dev_start_op,
+ .dev_stop = bnxt_dev_stop_op,
+ .dev_set_link_up = bnxt_dev_set_link_up_op,
+ .dev_set_link_down = bnxt_dev_set_link_down_op,
+ .stats_get = bnxt_stats_get_op,
+ .stats_reset = bnxt_stats_reset_op,
+ .rx_queue_setup = bnxt_rx_queue_setup_op,
+ .rx_queue_release = bnxt_rx_queue_release_op,
+ .tx_queue_setup = bnxt_tx_queue_setup_op,
+ .tx_queue_release = bnxt_tx_queue_release_op,
+ .reta_update = bnxt_reta_update_op,
+ .reta_query = bnxt_reta_query_op,
+ .rss_hash_update = bnxt_rss_hash_update_op,
+ .rss_hash_conf_get = bnxt_rss_hash_conf_get_op,
+ .link_update = bnxt_link_update_op,
+ .promiscuous_enable = bnxt_promiscuous_enable_op,
+ .promiscuous_disable = bnxt_promiscuous_disable_op,
+ .allmulticast_enable = bnxt_allmulticast_enable_op,
+ .allmulticast_disable = bnxt_allmulticast_disable_op,
+ .mac_addr_add = bnxt_mac_addr_add_op,
+ .mac_addr_remove = bnxt_mac_addr_remove_op,
+ .flow_ctrl_get = bnxt_flow_ctrl_get_op,
+ .flow_ctrl_set = bnxt_flow_ctrl_set_op,
+};
+
+static bool bnxt_vf_pciid(uint16_t id)
+{
+ if (id == BROADCOM_DEV_ID_57304_VF ||
+ id == BROADCOM_DEV_ID_57406_VF)
+ return true;
+ return false;
+}
+
+static int bnxt_init_board(struct rte_eth_dev *eth_dev)
+{
+ int rc;
+ struct bnxt *bp = eth_dev->data->dev_private;
+
+ /* enable device (incl. PCI PM wakeup), and bus-mastering */
+ if (!eth_dev->pci_dev->mem_resource[0].addr) {
+ RTE_LOG(ERR, PMD,
+ "Cannot find PCI device base address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_disable;
+ }
+
+ bp->eth_dev = eth_dev;
+ bp->pdev = eth_dev->pci_dev;
+
+ bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ if (!bp->bar0) {
+ RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
+ rc = -ENOMEM;
+ goto init_err_release;
+ }
+ return 0;
+
+init_err_release:
+ if (bp->bar0)
+ bp->bar0 = NULL;
+
+init_err_disable:
+
+ return rc;
+}
+
+static int
+bnxt_dev_init(struct rte_eth_dev *eth_dev)
+{
+ static int version_printed;
+ struct bnxt *bp;
+ int rc;
+
+ if (version_printed++ == 0)
+ RTE_LOG(INFO, PMD, "%s", bnxt_version);
+
+ if (eth_dev->pci_dev->addr.function >= 2 &&
+ eth_dev->pci_dev->addr.function < 4) {
+ RTE_LOG(ERR, PMD, "Function not enabled %x:\n",
+ eth_dev->pci_dev->addr.function);
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
+ bp = eth_dev->data->dev_private;
+
+ if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
+ bp->flags |= BNXT_FLAG_VF;
+
+ rc = bnxt_init_board(eth_dev);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Board initialization failed rc: %x\n", rc);
+ goto error;
+ }
+ eth_dev->dev_ops = &bnxt_dev_ops;
+ eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
+ eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "hwrm resource allocation failure rc: %x\n", rc);
+ goto error_free;
+ }
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ goto error_free;
+ bnxt_hwrm_queue_qportcfg(bp);
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
+ goto error_free;
+ }
+ eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
+ ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Failed to alloc %u bytes needed to store MAC addr tbl",
+ ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR);
+ rc = -ENOMEM;
+ goto error_free;
+ }
+ /* Copy the permanent MAC from the qcap response address now. */
+ if (BNXT_PF(bp))
+ memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
+ else
+ memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
+ memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+ bp->grp_info = rte_zmalloc("bnxt_grp_info",
+ sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
+ if (!bp->grp_info) {
+ RTE_LOG(ERR, PMD,
+ "Failed to alloc %zu bytes needed to store group info table\n",
+ sizeof(*bp->grp_info) * bp->max_ring_grps);
+ rc = -ENOMEM;
+ goto error_free;
+ }
+
+ rc = bnxt_hwrm_func_driver_register(bp, 0,
+ bp->pf.vf_req_fwd);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Failed to register driver");
+ rc = -EBUSY;
+ goto error_free;
+ }
+
+ RTE_LOG(INFO, PMD,
+ DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
+ eth_dev->pci_dev->mem_resource[0].phys_addr,
+ eth_dev->pci_dev->mem_resource[0].addr);
+
+ return 0;
+
+error_free:
+ eth_dev->driver->eth_dev_uninit(eth_dev);
+error:
+ return rc;
+}
+
+static int
+bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
+ struct bnxt *bp = eth_dev->data->dev_private;
+ int rc;
+
+ if (eth_dev->data->mac_addrs)
+ rte_free(eth_dev->data->mac_addrs);
+ if (bp->grp_info)
+ rte_free(bp->grp_info);
+ rc = bnxt_hwrm_func_driver_unregister(bp, 0);
+ bnxt_free_hwrm_resources(bp);
+ return rc;
+}
+
+static struct eth_driver bnxt_rte_pmd = {
+ .pci_drv = {
+ .name = "rte_" DRV_MODULE_NAME "_pmd",
+ .id_table = bnxt_pci_id_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .eth_dev_init = bnxt_dev_init,
+ .eth_dev_uninit = bnxt_dev_uninit,
+ .dev_private_size = sizeof(struct bnxt),
+};
+
+static int bnxt_rte_pmd_init(const char *name, const char *params __rte_unused)
+{
+ RTE_LOG(INFO, PMD, "bnxt_rte_pmd_init() called for %s\n", name);
+ rte_eth_driver_register(&bnxt_rte_pmd);
+ return 0;
+}
+
+static struct rte_driver bnxt_pmd_drv = {
+ .name = "eth_bnxt",
+ .type = PMD_PDEV,
+ .init = bnxt_rte_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(bnxt_pmd_drv);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
new file mode 100644
index 00000000..f03a1dc8
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -0,0 +1,175 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Filter Functions
+ */
+
+struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+
+ /* Find the 1st unused filter from the free_filter_list pool*/
+ filter = STAILQ_FIRST(&bp->free_filter_list);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ return NULL;
+ }
+ STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
+
+ /* Default to L2 MAC Addr filter */
+ filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
+ memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
+ ETHER_ADDR_LEN);
+ memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ return filter;
+}
+
+void bnxt_init_filters(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+ int i, max_filters;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ max_filters = pf->max_l2_ctx;
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ max_filters = vf->max_l2_ctx;
+ }
+ STAILQ_INIT(&bp->free_filter_list);
+ for (i = 0; i < max_filters; i++) {
+ filter = &bp->filter_info[i];
+ filter->fw_l2_filter_id = -1;
+ STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
+ }
+}
+
+void bnxt_free_all_filters(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_filter_info *filter, *temp_filter;
+ int i;
+
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ filter = STAILQ_FIRST(&vnic->filter);
+ while (filter) {
+ temp_filter = STAILQ_NEXT(filter, next);
+ STAILQ_REMOVE(&vnic->filter, filter,
+ bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&bp->free_filter_list,
+ filter, next);
+ filter = temp_filter;
+ }
+ STAILQ_INIT(&vnic->filter);
+ }
+ }
+}
+
+void bnxt_free_filter_mem(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter;
+ uint16_t max_filters, i;
+ int rc = 0;
+
+ /* Ensure that all filters are freed */
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ max_filters = pf->max_l2_ctx;
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ max_filters = vf->max_l2_ctx;
+ }
+ for (i = 0; i < max_filters; i++) {
+ filter = &bp->filter_info[i];
+ if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
+ RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
+ /* Call HWRM to try to free filter again */
+ rc = bnxt_hwrm_clear_filter(bp, filter);
+ if (rc)
+ RTE_LOG(ERR, PMD,
+ "HWRM filter cannot be freed rc = %d\n",
+ rc);
+ }
+ filter->fw_l2_filter_id = -1;
+ }
+ STAILQ_INIT(&bp->free_filter_list);
+
+ rte_free(bp->filter_info);
+ bp->filter_info = NULL;
+}
+
+int bnxt_alloc_filter_mem(struct bnxt *bp)
+{
+ struct bnxt_filter_info *filter_mem;
+ uint16_t max_filters;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ max_filters = pf->max_l2_ctx;
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ max_filters = vf->max_l2_ctx;
+ }
+ /* Allocate memory for VNIC pool and filter pool */
+ filter_mem = rte_zmalloc("bnxt_filter_info",
+ max_filters * sizeof(struct bnxt_filter_info),
+ 0);
+ if (filter_mem == NULL) {
+ RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
+ max_filters);
+ return -ENOMEM;
+ }
+ bp->filter_info = filter_mem;
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
new file mode 100644
index 00000000..06fe134a
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_FILTER_H_
+#define _BNXT_FILTER_H_
+
+#include <rte_ether.h>
+
+struct bnxt;
+struct bnxt_filter_info {
+ STAILQ_ENTRY(bnxt_filter_info) next;
+ uint64_t fw_l2_filter_id;
+#define INVALID_MAC_INDEX ((uint16_t)-1)
+ uint16_t mac_index;
+
+ /* Filter Characteristics */
+ uint32_t flags;
+ uint32_t enables;
+ uint8_t l2_addr[ETHER_ADDR_LEN];
+ uint8_t l2_addr_mask[ETHER_ADDR_LEN];
+ uint16_t l2_ovlan;
+ uint16_t l2_ovlan_mask;
+ uint16_t l2_ivlan;
+ uint16_t l2_ivlan_mask;
+ uint8_t t_l2_addr[ETHER_ADDR_LEN];
+ uint8_t t_l2_addr_mask[ETHER_ADDR_LEN];
+ uint16_t t_l2_ovlan;
+ uint16_t t_l2_ovlan_mask;
+ uint16_t t_l2_ivlan;
+ uint16_t t_l2_ivlan_mask;
+ uint8_t tunnel_type;
+ uint16_t mirror_vnic_id;
+ uint32_t vni;
+ uint8_t pri_hint;
+ uint64_t l2_filter_id_hint;
+};
+
+struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp);
+void bnxt_init_filters(struct bnxt *bp);
+void bnxt_free_all_filters(struct bnxt *bp);
+void bnxt_free_filter_mem(struct bnxt *bp);
+int bnxt_alloc_filter_mem(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
new file mode 100644
index 00000000..5d81a60d
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -0,0 +1,1491 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_version.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_ring.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+#define HWRM_CMD_TIMEOUT 2000
+
+/*
+ * HWRM Functions (sent to HWRM)
+ * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
+ * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
+ * command was failed by the ChiMP.
+ */
+
+static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
+ uint32_t msg_len)
+{
+ unsigned int i;
+ struct input *req = msg;
+ struct output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t *data = msg;
+ uint8_t *bar;
+ uint8_t *valid;
+
+ /* Write request msg to hwrm channel */
+ for (i = 0; i < msg_len; i += 4) {
+ bar = (uint8_t *)bp->bar0 + i;
+ *(volatile uint32_t *)bar = *data;
+ data++;
+ }
+
+ /* Zero the rest of the request space */
+ for (; i < bp->max_req_len; i += 4) {
+ bar = (uint8_t *)bp->bar0 + i;
+ *(volatile uint32_t *)bar = 0;
+ }
+
+ /* Ring channel doorbell */
+ bar = (uint8_t *)bp->bar0 + 0x100;
+ *(volatile uint32_t *)bar = 1;
+
+ /* Poll for the valid bit */
+ for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
+ /* Sanity check on the resp->resp_len */
+ rte_rmb();
+ if (resp->resp_len && resp->resp_len <=
+ bp->max_resp_len) {
+ /* Last byte of resp contains the valid key */
+ valid = (uint8_t *)resp + resp->resp_len - 1;
+ if (*valid == HWRM_RESP_VALID_KEY)
+ break;
+ }
+ rte_delay_us(600);
+ }
+
+ if (i >= HWRM_CMD_TIMEOUT) {
+ RTE_LOG(ERR, PMD, "Error sending msg %x\n",
+ req->req_type);
+ goto err_ret;
+ }
+ return 0;
+
+err_ret:
+ return -1;
+}
+
+static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
+{
+ int rc;
+
+ rte_spinlock_lock(&bp->hwrm_lock);
+ rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
+ rte_spinlock_unlock(&bp->hwrm_lock);
+ return rc;
+}
+
+#define HWRM_PREP(req, type, cr, resp) \
+ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
+ req.req_type = rte_cpu_to_le_16(HWRM_##type); \
+ req.cmpl_ring = rte_cpu_to_le_16(cr); \
+ req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
+ req.target_id = rte_cpu_to_le_16(0xffff); \
+ req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
+
+#define HWRM_CHECK_RESULT \
+ { \
+ if (rc) { \
+ RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
+ __func__, rc); \
+ return rc; \
+ } \
+ if (resp->error_code) { \
+ rc = rte_le_to_cpu_16(resp->error_code); \
+ RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
+ return rc; \
+ } \
+ }
+
+int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
+ struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.mask = 0;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
+ struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t mask = 0;
+
+ HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ /* FIXME add multicast flag, when multicast adding options is supported
+ * by ethtool.
+ */
+ if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
+ mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+ if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
+ mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+ req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST |
+ HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
+ mask);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_clear_filter(struct bnxt *bp,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
+ struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
+
+ req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ filter->fw_l2_filter_id = -1;
+
+ return 0;
+}
+
+int bnxt_hwrm_set_filter(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_filter_info *filter)
+{
+ int rc = 0;
+ struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
+ struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t enables = 0;
+
+ HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
+
+ req.flags = rte_cpu_to_le_32(filter->flags);
+
+ enables = filter->enables |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
+ req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
+ memcpy(req.l2_addr, filter->l2_addr,
+ ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
+ memcpy(req.l2_addr_mask, filter->l2_addr_mask,
+ ETHER_ADDR_LEN);
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
+ req.l2_ovlan = filter->l2_ovlan;
+ if (enables &
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
+ req.l2_ovlan_mask = filter->l2_ovlan_mask;
+
+ req.enables = rte_cpu_to_le_32(enables);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
+
+ return rc;
+}
+
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
+{
+ int rc;
+ struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
+
+ memcpy(req.encap_request, fwd_cmd,
+ sizeof(req.encap_request));
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {.req_type = 0 };
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ pf->fw_fid = rte_le_to_cpu_32(resp->fid);
+ pf->port_id = resp->port_id;
+ memcpy(pf->mac_addr, resp->perm_mac_address, ETHER_ADDR_LEN);
+ pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->fw_fid = rte_le_to_cpu_32(resp->fid);
+ memcpy(vf->mac_addr, &resp->perm_mac_address, ETHER_ADDR_LEN);
+ vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ }
+
+ return rc;
+}
+
+int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_func_reset_input req = {.req_type = 0 };
+ struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, FUNC_RESET, -1, resp);
+
+ req.enables = rte_cpu_to_le_32(0);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
+ uint32_t *vf_req_fwd)
+{
+ int rc;
+ struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
+ struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (bp->flags & BNXT_FLAG_REGISTERED)
+ return 0;
+
+ HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
+ req.flags = flags;
+ req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER;
+ req.ver_maj = RTE_VER_YEAR;
+ req.ver_min = RTE_VER_MONTH;
+ req.ver_upd = RTE_VER_MINOR;
+
+ memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ bp->flags |= BNXT_FLAG_REGISTERED;
+
+ return rc;
+}
+
+int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_ver_get_input req = {.req_type = 0 };
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t my_version;
+ uint32_t fw_version;
+ uint16_t max_resp_len;
+ char type[RTE_MEMZONE_NAMESIZE];
+
+ HWRM_PREP(req, VER_GET, -1, resp);
+
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ /*
+ * Hold the lock since we may be adjusting the response pointers.
+ */
+ rte_spinlock_lock(&bp->hwrm_lock);
+ rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
+ resp->hwrm_intf_maj, resp->hwrm_intf_min,
+ resp->hwrm_intf_upd,
+ resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
+
+ my_version = HWRM_VERSION_MAJOR << 16;
+ my_version |= HWRM_VERSION_MINOR << 8;
+ my_version |= HWRM_VERSION_UPDATE;
+
+ fw_version = resp->hwrm_intf_maj << 16;
+ fw_version |= resp->hwrm_intf_min << 8;
+ fw_version |= resp->hwrm_intf_upd;
+
+ if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
+ RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (my_version != fw_version) {
+ RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
+ if (my_version < fw_version) {
+ RTE_LOG(INFO, PMD,
+ "Firmware API version is newer than driver.\n");
+ RTE_LOG(INFO, PMD,
+ "The driver may be missing features.\n");
+ } else {
+ RTE_LOG(INFO, PMD,
+ "Firmware API version is older than driver.\n");
+ RTE_LOG(INFO, PMD,
+ "Not all driver features may be functional.\n");
+ }
+ }
+
+ if (bp->max_req_len > resp->max_req_win_len) {
+ RTE_LOG(ERR, PMD, "Unsupported request length\n");
+ rc = -EINVAL;
+ }
+ bp->max_req_len = resp->max_req_win_len;
+ max_resp_len = resp->max_resp_len;
+ if (bp->max_resp_len != max_resp_len) {
+ sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
+ bp->pdev->addr.devid, bp->pdev->addr.function);
+
+ rte_free(bp->hwrm_cmd_resp_addr);
+
+ bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
+ if (bp->hwrm_cmd_resp_addr == NULL) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ bp->hwrm_cmd_resp_dma_addr =
+ rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+ bp->max_resp_len = max_resp_len;
+ }
+
+error:
+ rte_spinlock_unlock(&bp->hwrm_lock);
+ return rc;
+}
+
+int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
+{
+ int rc;
+ struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
+ struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (!(bp->flags & BNXT_FLAG_REGISTERED))
+ return 0;
+
+ HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
+ req.flags = flags;
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ bp->flags &= ~BNXT_FLAG_REGISTERED;
+
+ return rc;
+}
+
+static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
+{
+ int rc = 0;
+ struct hwrm_port_phy_cfg_input req = {.req_type = 0};
+ struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
+
+ req.flags = conf->phy_flags;
+ if (conf->link_up) {
+ req.force_link_speed = conf->link_speed;
+ /*
+ * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
+ * any auto mode, even "none".
+ */
+ if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) {
+ req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
+ } else {
+ req.auto_mode = conf->auto_mode;
+ req.enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
+ req.auto_link_speed_mask = conf->auto_link_speed_mask;
+ req.enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
+ req.auto_link_speed = conf->auto_link_speed;
+ req.enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
+ }
+ req.auto_duplex = conf->duplex;
+ req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
+ req.auto_pause = conf->auto_pause;
+ /* Set force_pause if there is no auto or if there is a force */
+ if (req.auto_pause)
+ req.enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
+ else
+ req.enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
+ req.force_pause = conf->force_pause;
+ if (req.force_pause)
+ req.enables |=
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
+ } else {
+ req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
+ req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN;
+ req.force_link_speed = 0;
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
+ struct bnxt_link_info *link_info)
+{
+ int rc = 0;
+ struct hwrm_port_phy_qcfg_input req = {.req_type = 0};
+ struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ link_info->phy_link_status = resp->link;
+ if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
+ link_info->link_up = 1;
+ link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
+ } else {
+ link_info->link_up = 0;
+ link_info->link_speed = 0;
+ }
+ link_info->duplex = resp->duplex;
+ link_info->pause = resp->pause;
+ link_info->auto_pause = resp->auto_pause;
+ link_info->force_pause = resp->force_pause;
+ link_info->auto_mode = resp->auto_mode;
+
+ link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
+ link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
+ link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+ link_info->phy_ver[0] = resp->phy_maj;
+ link_info->phy_ver[1] = resp->phy_min;
+ link_info->phy_ver[2] = resp->phy_bld;
+
+ return rc;
+}
+
+int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
+ struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+#define GET_QUEUE_INFO(x) \
+ bp->cos_queue[x].id = resp->queue_id##x; \
+ bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
+
+ GET_QUEUE_INFO(0);
+ GET_QUEUE_INFO(1);
+ GET_QUEUE_INFO(2);
+ GET_QUEUE_INFO(3);
+ GET_QUEUE_INFO(4);
+ GET_QUEUE_INFO(5);
+ GET_QUEUE_INFO(6);
+ GET_QUEUE_INFO(7);
+
+ return rc;
+}
+
+int bnxt_hwrm_ring_alloc(struct bnxt *bp,
+ struct bnxt_ring *ring,
+ uint32_t ring_type, uint32_t map_index,
+ uint32_t stats_ctx_id)
+{
+ int rc = 0;
+ struct hwrm_ring_alloc_input req = {.req_type = 0 };
+ struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, RING_ALLOC, -1, resp);
+
+ req.enables = rte_cpu_to_le_32(0);
+
+ req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
+ req.fbo = rte_cpu_to_le_32(0);
+ /* Association of ring index with doorbell index */
+ req.logical_id = rte_cpu_to_le_16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
+ req.queue_id = bp->cos_queue[0].id;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
+ req.ring_type = ring_type;
+ req.cmpl_ring_id =
+ rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
+ req.length = rte_cpu_to_le_32(ring->ring_size);
+ req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
+ req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
+ break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
+ req.ring_type = ring_type;
+ /*
+ * TODO: Some HWRM versions crash with
+ * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
+ */
+ req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+ req.length = rte_cpu_to_le_32(ring->ring_size);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
+ ring_type);
+ return -1;
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc || resp->error_code) {
+ if (rc == 0 && resp->error_code)
+ rc = rte_le_to_cpu_16(resp->error_code);
+ switch (ring_type) {
+ case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
+ RTE_LOG(ERR, PMD,
+ "hwrm_ring_alloc cp failed. rc:%d\n", rc);
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+ RTE_LOG(ERR, PMD,
+ "hwrm_ring_alloc rx failed. rc:%d\n", rc);
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+ RTE_LOG(ERR, PMD,
+ "hwrm_ring_alloc tx failed. rc:%d\n", rc);
+ return rc;
+ default:
+ RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+ return rc;
+ }
+ }
+
+ ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
+ return rc;
+}
+
+int bnxt_hwrm_ring_free(struct bnxt *bp,
+ struct bnxt_ring *ring, uint32_t ring_type)
+{
+ int rc;
+ struct hwrm_ring_free_input req = {.req_type = 0 };
+ struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, RING_FREE, -1, resp);
+
+ req.ring_type = ring_type;
+ req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc || resp->error_code) {
+ if (rc == 0 && resp->error_code)
+ rc = rte_le_to_cpu_16(resp->error_code);
+
+ switch (ring_type) {
+ case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
+ RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
+ rc);
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
+ RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
+ rc);
+ return rc;
+ case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
+ RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
+ rc);
+ return rc;
+ default:
+ RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
+{
+ int rc = 0;
+ struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
+ struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
+
+ req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
+ req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
+ req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
+ req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ bp->grp_info[idx].fw_grp_id =
+ rte_le_to_cpu_16(resp->ring_group_id);
+
+ return rc;
+}
+
+int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
+{
+ int rc;
+ struct hwrm_ring_grp_free_input req = {.req_type = 0 };
+ struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, RING_GRP_FREE, -1, resp);
+
+ req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
+ return rc;
+}
+
+int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ int rc = 0;
+ struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
+ struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
+
+ if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
+ return rc;
+
+ req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+ req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr, unsigned int idx)
+{
+ int rc;
+ struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
+ struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
+
+ req.update_period_ms = rte_cpu_to_le_32(1000);
+
+ req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
+ req.stats_dma_addr =
+ rte_cpu_to_le_64(cpr->hw_stats_map);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
+ bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
+ return rc;
+}
+
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr, unsigned int idx)
+{
+ int rc;
+ struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
+ struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
+
+ req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
+ req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0, i, j;
+ struct hwrm_vnic_alloc_input req = {.req_type = 0 };
+ struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+ /* map ring groups to this vnic */
+ for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
+ if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
+ RTE_LOG(ERR, PMD,
+ "Not enough ring groups avail:%x req:%x\n", j,
+ (vnic->end_grp_id - vnic->start_grp_id) + 1);
+ break;
+ }
+ vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+ }
+
+ vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
+
+ HWRM_PREP(req, VNIC_ALLOC, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+ return rc;
+}
+
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_CFG, -1, resp);
+
+ /* Only RSS support for now TBD: COS & LB */
+ req.enables =
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
+ HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
+ HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+ req.dflt_ring_grp =
+ rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
+ req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+ req.cos_rule = rte_cpu_to_le_16(0xffff);
+ req.lb_rule = rte_cpu_to_le_16(0xffff);
+ req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ if (vnic->func_default)
+ req.flags = 1;
+ if (vnic->vlan_strip)
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
+ bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
+
+ req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+
+ return rc;
+}
+
+int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_free_input req = {.req_type = 0 };
+ struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return rc;
+
+ HWRM_PREP(req, VNIC_FREE, -1, resp);
+
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ return rc;
+}
+
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int rc = 0;
+ struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
+
+ req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+
+ req.ring_grp_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_table_dma_addr);
+ req.hash_key_tbl_addr =
+ rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
+ req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
+/*
+ * HWRM utility functions
+ */
+
+int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+
+ if (i >= bp->rx_cp_nr_rings) {
+ txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
+ cpr = txq->cp_ring;
+ } else {
+ rxq = bp->rx_queues[i];
+ cpr = rxq->cp_ring;
+ }
+
+ rc = bnxt_hwrm_stat_clear(bp, cpr);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ int rc;
+ unsigned int i;
+ struct bnxt_cp_ring_info *cpr;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+ unsigned int idx = i + 1;
+
+ if (i >= bp->rx_cp_nr_rings)
+ cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
+ else
+ cpr = bp->rx_queues[i]->cp_ring;
+ if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
+ rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
+ if (rc)
+ return rc;
+ }
+ }
+ return 0;
+}
+
+int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq;
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ unsigned int idx = i + 1;
+
+ if (i >= bp->rx_cp_nr_rings) {
+ txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
+ cpr = txq->cp_ring;
+ } else {
+ rxq = bp->rx_queues[i];
+ cpr = rxq->cp_ring;
+ }
+
+ rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
+
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
+{
+ uint16_t i;
+ uint32_t rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ unsigned int idx = i + 1;
+
+ if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
+ RTE_LOG(ERR, PMD,
+ "Attempt to free invalid ring group %d\n",
+ idx);
+ continue;
+ }
+
+ rc = bnxt_hwrm_ring_grp_free(bp, idx);
+
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+static void bnxt_free_cp_ring(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr, unsigned int idx)
+{
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+ bnxt_hwrm_ring_free(bp, cp_ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
+ cp_ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
+ memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
+ sizeof(*cpr->cp_desc_ring));
+ cpr->cp_raw_cons = 0;
+}
+
+int bnxt_free_all_hwrm_rings(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ unsigned int idx = bp->rx_cp_nr_rings + i + 1;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_TX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(txr->tx_desc_ring, 0,
+ txr->tx_ring_struct->ring_size *
+ sizeof(*txr->tx_desc_ring));
+ memset(txr->tx_buf_ring, 0,
+ txr->tx_ring_struct->ring_size *
+ sizeof(*txr->tx_buf_ring));
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr, idx);
+ }
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ unsigned int idx = i + 1;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->rx_desc_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_desc_ring));
+ memset(rxr->rx_buf_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_buf_ring));
+ rxr->rx_prod = 0;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr, idx);
+ }
+
+ /* Default completion ring */
+ {
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr, 0);
+ }
+
+ return rc;
+}
+
+int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
+{
+ uint16_t i;
+ uint32_t rc = 0;
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ unsigned int idx = i + 1;
+
+ if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
+ bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
+ continue;
+
+ rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
+
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+ /* Release memzone */
+ rte_free(bp->hwrm_cmd_resp_addr);
+ bp->hwrm_cmd_resp_addr = NULL;
+ bp->hwrm_cmd_resp_dma_addr = 0;
+}
+
+int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+ struct rte_pci_device *pdev = bp->pdev;
+ char type[RTE_MEMZONE_NAMESIZE];
+
+ sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+ bp->max_req_len = HWRM_MAX_REQ_LEN;
+ bp->max_resp_len = HWRM_MAX_RESP_LEN;
+ bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
+ if (bp->hwrm_cmd_resp_addr == NULL)
+ return -ENOMEM;
+ bp->hwrm_cmd_resp_dma_addr =
+ rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_spinlock_init(&bp->hwrm_lock);
+
+ return 0;
+}
+
+int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter;
+ int rc = 0;
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ rc = bnxt_hwrm_clear_filter(bp, filter);
+ if (rc)
+ break;
+ }
+ return rc;
+}
+
+int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter;
+ int rc = 0;
+
+ STAILQ_FOREACH(filter, &vnic->filter, next) {
+ rc = bnxt_hwrm_set_filter(bp, vnic, filter);
+ if (rc)
+ break;
+ }
+ return rc;
+}
+
+void bnxt_free_all_hwrm_resources(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ unsigned int i;
+
+ if (bp->vnic_info == NULL)
+ return;
+
+ vnic = &bp->vnic_info[0];
+ bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
+
+ /* VNIC resources */
+ for (i = 0; i < bp->nr_vnics; i++) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ bnxt_clear_hwrm_vnic_filters(bp, vnic);
+
+ bnxt_hwrm_vnic_ctx_free(bp, vnic);
+ bnxt_hwrm_vnic_free(bp, vnic);
+ }
+ /* Ring resources */
+ bnxt_free_all_hwrm_rings(bp);
+ bnxt_free_all_hwrm_ring_grps(bp);
+ bnxt_free_all_hwrm_stat_ctxs(bp);
+}
+
+static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
+{
+ uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
+
+ if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
+
+ switch (conf_link_speed) {
+ case ETH_LINK_SPEED_10M_HD:
+ case ETH_LINK_SPEED_100M_HD:
+ return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
+ }
+ return hw_link_duplex;
+}
+
+static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
+{
+ uint16_t eth_link_speed = 0;
+
+ if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+ return ETH_LINK_SPEED_AUTONEG;
+
+ switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
+ case ETH_LINK_SPEED_100M:
+ case ETH_LINK_SPEED_100M_HD:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB;
+ break;
+ case ETH_LINK_SPEED_1G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
+ break;
+ case ETH_LINK_SPEED_2_5G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
+ break;
+ case ETH_LINK_SPEED_10G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
+ break;
+ case ETH_LINK_SPEED_20G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
+ break;
+ case ETH_LINK_SPEED_25G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
+ break;
+ case ETH_LINK_SPEED_40G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
+ break;
+ case ETH_LINK_SPEED_50G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
+ break;
+ default:
+ RTE_LOG(ERR, PMD,
+ "Unsupported link speed %d; default to AUTO\n",
+ conf_link_speed);
+ break;
+ }
+ return eth_link_speed;
+}
+
+#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
+ ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
+
+static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
+{
+ uint32_t one_speed;
+
+ if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ return 0;
+
+ if (link_speed & ETH_LINK_SPEED_FIXED) {
+ one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
+
+ if (one_speed & (one_speed - 1)) {
+ RTE_LOG(ERR, PMD,
+ "Invalid advertised speeds (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported advertised speed (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ } else {
+ if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported advertised speeds (%u) for port %u\n",
+ link_speed, port_id);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
+{
+ uint16_t ret = 0;
+
+ if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ link_speed = BNXT_SUPPORTED_SPEEDS;
+
+ if (link_speed & ETH_LINK_SPEED_100M)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
+ if (link_speed & ETH_LINK_SPEED_100M_HD)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
+ if (link_speed & ETH_LINK_SPEED_1G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
+ if (link_speed & ETH_LINK_SPEED_2_5G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
+ if (link_speed & ETH_LINK_SPEED_10G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
+ if (link_speed & ETH_LINK_SPEED_20G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
+ if (link_speed & ETH_LINK_SPEED_25G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
+ if (link_speed & ETH_LINK_SPEED_40G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
+ if (link_speed & ETH_LINK_SPEED_50G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
+ return ret;
+}
+
+static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
+{
+ uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
+
+ switch (hw_link_speed) {
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
+ eth_link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
+ eth_link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
+ eth_link_speed = ETH_SPEED_NUM_2_5G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
+ eth_link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
+ eth_link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
+ eth_link_speed = ETH_SPEED_NUM_25G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
+ eth_link_speed = ETH_SPEED_NUM_40G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
+ eth_link_speed = ETH_SPEED_NUM_50G;
+ break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
+ default:
+ RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
+ hw_link_speed);
+ break;
+ }
+ return eth_link_speed;
+}
+
+static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
+{
+ uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ switch (hw_link_duplex) {
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
+ eth_link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
+ eth_link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
+ hw_link_duplex);
+ break;
+ }
+ return eth_link_duplex;
+}
+
+int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
+{
+ int rc = 0;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Get link config failed with rc %d\n", rc);
+ goto exit;
+ }
+ if (link_info->link_up)
+ link->link_speed =
+ bnxt_parse_hw_link_speed(link_info->link_speed);
+ else
+ link->link_speed = ETH_LINK_SPEED_10M;
+ link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
+ link->link_status = link_info->link_up;
+ link->link_autoneg = link_info->auto_mode ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
+ ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
+exit:
+ return rc;
+}
+
+int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
+{
+ int rc = 0;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_link_info link_req;
+ uint16_t speed;
+
+ rc = bnxt_valid_link_speed(dev_conf->link_speeds,
+ bp->eth_dev->data->port_id);
+ if (rc)
+ goto error;
+
+ memset(&link_req, 0, sizeof(link_req));
+ speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
+ link_req.link_up = link_up;
+ if (speed == 0) {
+ link_req.phy_flags =
+ HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
+ link_req.auto_mode =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW;
+ link_req.auto_link_speed_mask =
+ bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
+ link_req.auto_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB;
+ } else {
+ link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
+ link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE |
+ HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
+ link_req.link_speed = speed;
+ }
+ link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
+ link_req.auto_pause = bp->link_info.auto_pause;
+ link_req.force_pause = bp->link_info.force_pause;
+
+ rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "Set link config failed with rc %d\n", rc);
+ }
+
+error:
+ return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
new file mode 100644
index 00000000..a508024f
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -0,0 +1,104 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_HWRM_H_
+#define _BNXT_HWRM_H_
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+struct bnxt;
+struct bnxt_filter_info;
+struct bnxt_cp_ring_info;
+
+#define HWRM_SEQ_ID_INVALID -1U
+
+int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_clear_filter(struct bnxt *bp,
+ struct bnxt_filter_info *filter);
+int bnxt_hwrm_set_filter(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ struct bnxt_filter_info *filter);
+
+int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd);
+
+int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
+ uint32_t *vf_req_fwd);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_reset(struct bnxt *bp);
+int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
+
+int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
+
+int bnxt_hwrm_ring_alloc(struct bnxt *bp,
+ struct bnxt_ring *ring,
+ uint32_t ring_type, uint32_t map_index,
+ uint32_t stats_ctx_id);
+int bnxt_hwrm_ring_free(struct bnxt *bp,
+ struct bnxt_ring *ring, uint32_t ring_type);
+int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
+int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx);
+
+int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr, unsigned int idx);
+int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr, unsigned int idx);
+
+int bnxt_hwrm_ver_get(struct bnxt *bp);
+
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
+
+int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp);
+int bnxt_free_all_hwrm_rings(struct bnxt *bp);
+int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp);
+int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp);
+int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_free_all_hwrm_resources(struct bnxt *bp);
+void bnxt_free_hwrm_resources(struct bnxt *bp);
+int bnxt_alloc_hwrm_resources(struct bnxt *bp);
+int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
+int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
new file mode 100644
index 00000000..3f81ffcc
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -0,0 +1,306 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_memzone.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Generic ring handling
+ */
+
+void bnxt_free_ring(struct bnxt_ring *ring)
+{
+ if (ring->vmem_size && *ring->vmem) {
+ memset((char *)*ring->vmem, 0, ring->vmem_size);
+ *ring->vmem = NULL;
+ }
+ rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
+}
+
+/*
+ * Ring groups
+ */
+
+void bnxt_init_ring_grps(struct bnxt *bp)
+{
+ unsigned int i;
+
+ for (i = 0; i < bp->max_ring_grps; i++)
+ memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
+ sizeof(struct bnxt_ring_grp_info));
+}
+
+/*
+ * Allocates a completion ring with vmem and stats optionally also allocating
+ * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
+ * to not allocate them.
+ *
+ * Order in the allocation is:
+ * stats - Always non-zero length
+ * cp vmem - Always zero-length, supported for the bnxt_ring abstraction
+ * tx vmem - Only non-zero length if tx_ring_info is not NULL
+ * rx vmem - Only non-zero length if rx_ring_info is not NULL
+ * cp bd ring - Always non-zero length
+ * tx bd ring - Only non-zero length if tx_ring_info is not NULL
+ * rx bd ring - Only non-zero length if rx_ring_info is not NULL
+ */
+int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
+ struct bnxt_tx_ring_info *tx_ring_info,
+ struct bnxt_rx_ring_info *rx_ring_info,
+ struct bnxt_cp_ring_info *cp_ring_info,
+ const char *suffix)
+{
+ struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
+ struct bnxt_ring *tx_ring;
+ struct bnxt_ring *rx_ring;
+ struct rte_pci_device *pdev = bp->pdev;
+ const struct rte_memzone *mz = NULL;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+
+ int stats_len = (tx_ring_info || rx_ring_info) ?
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
+
+ int cp_vmem_start = stats_len;
+ int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
+
+ int tx_vmem_start = cp_vmem_start + cp_vmem_len;
+ int tx_vmem_len =
+ tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info->
+ tx_ring_struct->vmem_size) : 0;
+
+ int rx_vmem_start = tx_vmem_start + tx_vmem_len;
+ int rx_vmem_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rx_ring_info->
+ rx_ring_struct->vmem_size) : 0;
+
+ int cp_ring_start = rx_vmem_start + rx_vmem_len;
+ int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size *
+ sizeof(struct cmpl_base));
+
+ int tx_ring_start = cp_ring_start + cp_ring_len;
+ int tx_ring_len = tx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size *
+ sizeof(struct tx_bd_long)) : 0;
+
+ int rx_ring_start = tx_ring_start + tx_ring_len;
+ int rx_ring_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
+ sizeof(struct rx_prod_pkt_bd)) : 0;
+
+ int total_alloc_len = rx_ring_start + rx_ring_len;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx,
+ suffix);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name, total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL)
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+
+ if (tx_ring_info) {
+ tx_ring = tx_ring_info->tx_ring_struct;
+
+ tx_ring->bd = ((char *)mz->addr + tx_ring_start);
+ tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd;
+ tx_ring->bd_dma = mz->phys_addr + tx_ring_start;
+ tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
+ tx_ring->mem_zone = (const void *)mz;
+
+ if (!tx_ring->bd)
+ return -ENOMEM;
+ if (tx_ring->vmem_size) {
+ tx_ring->vmem =
+ (void **)((char *)mz->addr + tx_vmem_start);
+ tx_ring_info->tx_buf_ring =
+ (struct bnxt_sw_tx_bd *)tx_ring->vmem;
+ }
+ }
+
+ if (rx_ring_info) {
+ rx_ring = rx_ring_info->rx_ring_struct;
+
+ rx_ring->bd = ((char *)mz->addr + rx_ring_start);
+ rx_ring_info->rx_desc_ring =
+ (struct rx_prod_pkt_bd *)rx_ring->bd;
+ rx_ring->bd_dma = mz->phys_addr + rx_ring_start;
+ rx_ring_info->rx_desc_mapping = rx_ring->bd_dma;
+ rx_ring->mem_zone = (const void *)mz;
+
+ if (!rx_ring->bd)
+ return -ENOMEM;
+ if (rx_ring->vmem_size) {
+ rx_ring->vmem =
+ (void **)((char *)mz->addr + rx_vmem_start);
+ rx_ring_info->rx_buf_ring =
+ (struct bnxt_sw_rx_bd *)rx_ring->vmem;
+ }
+ }
+
+ cp_ring->bd = ((char *)mz->addr + cp_ring_start);
+ cp_ring->bd_dma = mz->phys_addr + cp_ring_start;
+ cp_ring_info->cp_desc_ring = cp_ring->bd;
+ cp_ring_info->cp_desc_mapping = cp_ring->bd_dma;
+ cp_ring->mem_zone = (const void *)mz;
+
+ if (!cp_ring->bd)
+ return -ENOMEM;
+ if (cp_ring->vmem_size)
+ *cp_ring->vmem = ((char *)mz->addr + stats_len);
+ if (stats_len) {
+ cp_ring_info->hw_stats = mz->addr;
+ cp_ring_info->hw_stats_map = mz->phys_addr;
+ }
+ cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
+ return 0;
+}
+
+/* ring_grp usage:
+ * [0] = default completion ring
+ * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
+ * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings
+ */
+int bnxt_alloc_hwrm_rings(struct bnxt *bp)
+{
+ unsigned int i;
+ int rc = 0;
+
+ /* Default completion ring */
+ {
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+ 0, HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
+ }
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ unsigned int idx = i + 1;
+
+ /* Rx cmpl */
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+ idx, HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+ cpr->cp_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ idx * 0x80;
+ bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ /* Rx ring */
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ idx, cpr->hw_stats_ctx_id);
+ if (rc)
+ goto err_out;
+ rxr->rx_prod = 0;
+ rxr->rx_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ idx * 0x80;
+ bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ if (bnxt_init_one_rx_ring(rxq)) {
+ RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!");
+ bnxt_rx_queue_release_op(rxq);
+ return -ENOMEM;
+ }
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ }
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+ unsigned int idx = 1 + bp->rx_cp_nr_rings + i;
+
+ /* Tx cmpl */
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
+ idx, HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+
+ cpr->cp_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ idx * 0x80;
+ bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ /* Tx ring */
+ rc = bnxt_hwrm_ring_alloc(bp, ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
+ idx, cpr->hw_stats_ctx_id);
+ if (rc)
+ goto err_out;
+
+ txr->tx_doorbell =
+ (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ idx * 0x80;
+ }
+
+err_out:
+ return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
new file mode 100644
index 00000000..8656549a
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -0,0 +1,103 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_RING_H_
+#define _BNXT_RING_H_
+
+#include <inttypes.h>
+
+#include <rte_memory.h>
+
+#define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask)
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+ ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
+
+#define DB_IDX_MASK 0xffffff
+#define DB_IDX_VALID (0x1 << 26)
+#define DB_IRQ_DIS (0x1 << 27)
+#define DB_KEY_TX (0x0 << 28)
+#define DB_KEY_RX (0x1 << 28)
+#define DB_KEY_CP (0x2 << 28)
+#define DB_KEY_ST (0x3 << 28)
+#define DB_KEY_TX_PUSH (0x4 << 28)
+#define DB_LONG_TX_PUSH (0x2 << 24)
+
+#define DEFAULT_CP_RING_SIZE 256
+#define DEFAULT_RX_RING_SIZE 256
+#define DEFAULT_TX_RING_SIZE 256
+
+#define MAX_TPA 128
+
+/* These assume 4k pages */
+#define MAX_RX_DESC_CNT (8 * 1024)
+#define MAX_TX_DESC_CNT (4 * 1024)
+#define MAX_CP_DESC_CNT (16 * 1024)
+
+#define INVALID_HW_RING_ID ((uint16_t)-1)
+
+struct bnxt_ring {
+ void *bd;
+ phys_addr_t bd_dma;
+ uint32_t ring_size;
+ uint32_t ring_mask;
+
+ int vmem_size;
+ void **vmem;
+
+ uint16_t fw_ring_id; /* Ring id filled by Chimp FW */
+ const void *mem_zone;
+};
+
+struct bnxt_ring_grp_info {
+ uint16_t fw_stats_ctx;
+ uint16_t fw_grp_id;
+ uint16_t rx_fw_ring_id;
+ uint16_t cp_fw_ring_id;
+ uint16_t ag_fw_ring_id;
+};
+
+struct bnxt;
+struct bnxt_tx_ring_info;
+struct bnxt_rx_ring_info;
+struct bnxt_cp_ring_info;
+void bnxt_free_ring(struct bnxt_ring *ring);
+void bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
+ struct bnxt_tx_ring_info *tx_ring_info,
+ struct bnxt_rx_ring_info *rx_ring_info,
+ struct bnxt_cp_ring_info *cp_ring_info,
+ const char *suffix);
+int bnxt_alloc_hwrm_rings(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
new file mode 100644
index 00000000..cddf17d5
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -0,0 +1,319 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxq.h"
+#include "bnxt_rxr.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * RX Queues
+ */
+
+void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+
+ if (cpr->hw_stats)
+ cpr->hw_stats = NULL;
+}
+
+int bnxt_mq_rx_configure(struct bnxt *bp)
+{
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ unsigned int i, j, nb_q_per_grp, ring_idx;
+ int start_grp_id, end_grp_id, rc = 0;
+ struct bnxt_vnic_info *vnic;
+ struct bnxt_filter_info *filter;
+ struct bnxt_rx_queue *rxq;
+
+ bp->nr_vnics = 0;
+
+ /* Single queue mode */
+ if (bp->rx_cp_nr_rings < 2) {
+ vnic = bnxt_alloc_vnic(bp);
+ if (!vnic) {
+ RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
+ bp->nr_vnics++;
+
+ rxq = bp->eth_dev->data->rx_queues[0];
+ rxq->vnic = vnic;
+
+ vnic->func_default = true;
+ vnic->ff_pool_idx = 0;
+ vnic->start_grp_id = 1;
+ vnic->end_grp_id = vnic->start_grp_id +
+ bp->rx_cp_nr_rings - 1;
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+ goto out;
+ }
+
+ /* Multi-queue mode */
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
+ enum rte_eth_nb_pools pools;
+
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_RSS:
+ case ETH_MQ_RX_VMDQ_ONLY:
+ {
+ const struct rte_eth_vmdq_rx_conf *conf =
+ &dev_conf->rx_adv_conf.vmdq_rx_conf;
+
+ /* ETH_8/64_POOLs */
+ pools = conf->nb_queue_pools;
+ break;
+ }
+ default:
+ RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
+ dev_conf->rxmode.mq_mode);
+ rc = -EINVAL;
+ goto err_out;
+ }
+ /* For each pool, allocate MACVLAN CFA rule & VNIC */
+ if (!pools) {
+ RTE_LOG(ERR, PMD,
+ "VMDq pool not set, defaulted to 64\n");
+ pools = ETH_64_POOLS;
+ }
+ nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ start_grp_id = 1;
+ end_grp_id = start_grp_id + nb_q_per_grp - 1;
+
+ ring_idx = 0;
+ for (i = 0; i < pools; i++) {
+ vnic = bnxt_alloc_vnic(bp);
+ if (!vnic) {
+ RTE_LOG(ERR, PMD,
+ "VNIC alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
+ bp->nr_vnics++;
+
+ for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
+ rxq = bp->eth_dev->data->rx_queues[ring_idx];
+ rxq->vnic = vnic;
+ }
+ if (i == 0)
+ vnic->func_default = true;
+ vnic->ff_pool_idx = i;
+ vnic->start_grp_id = start_grp_id;
+ vnic->end_grp_id = end_grp_id;
+
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ RTE_LOG(ERR, PMD,
+ "L2 filter alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ /*
+ * TODO: Configure & associate CFA rule for
+ * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
+ */
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+
+ start_grp_id = end_grp_id + 1;
+ end_grp_id += nb_q_per_grp;
+ }
+ goto out;
+ }
+
+ /* Non-VMDq mode - RSS, DCB, RSS+DCB */
+ /* Init default VNIC for RSS or DCB only */
+ vnic = bnxt_alloc_vnic(bp);
+ if (!vnic) {
+ RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ /* Partition the rx queues for the single pool */
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ rxq = bp->eth_dev->data->rx_queues[i];
+ rxq->vnic = vnic;
+ }
+ STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
+ bp->nr_vnics++;
+
+ vnic->func_default = true;
+ vnic->ff_pool_idx = 0;
+ vnic->start_grp_id = 1;
+ vnic->end_grp_id = vnic->start_grp_id +
+ bp->rx_cp_nr_rings - 1;
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ vnic->hash_type =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+
+out:
+ return rc;
+
+err_out:
+ /* Free allocated vnic/filters */
+
+ return rc;
+}
+
+static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
+{
+ struct bnxt_sw_rx_bd *sw_ring;
+ uint16_t i;
+
+ if (rxq) {
+ sw_ring = rxq->rx_ring->rx_buf_ring;
+ if (sw_ring) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+ }
+}
+
+void bnxt_free_rx_mbufs(struct bnxt *bp)
+{
+ struct bnxt_rx_queue *rxq;
+ int i;
+
+ for (i = 0; i < (int)bp->rx_nr_rings; i++) {
+ rxq = bp->rx_queues[i];
+ bnxt_rx_queue_release_mbufs(rxq);
+ }
+}
+
+void bnxt_rx_queue_release_op(void *rx_queue)
+{
+ struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+
+ if (rxq) {
+ bnxt_rx_queue_release_mbufs(rxq);
+
+ /* Free RX ring hardware descriptors */
+ bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+
+ /* Free RX completion ring hardware descriptors */
+ bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+
+ bnxt_free_rxq_stats(rxq);
+
+ rte_free(rxq);
+ }
+}
+
+int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_rx_queue *rxq;
+ int rc = 0;
+
+ if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
+ RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_idx];
+ if (rxq)
+ bnxt_rx_queue_release_op(rxq);
+ }
+ rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq) {
+ RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+ rc = -ENOMEM;
+ goto out;
+ }
+ rxq->bp = bp;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+
+ rc = bnxt_init_rx_ring_struct(rxq, socket_id);
+ if (rc)
+ goto out;
+
+ rxq->queue_id = queue_idx;
+ rxq->port_id = eth_dev->data->port_id;
+ rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ?
+ 0 : ETHER_CRC_LEN);
+
+ eth_dev->data->rx_queues[queue_idx] = rxq;
+ /* Allocate RX ring hardware descriptors */
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
+ "rxr")) {
+ RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+ bnxt_rx_queue_release_op(rxq);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+out:
+ return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
new file mode 100644
index 00000000..95543298
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_RQX_H_
+#define _BNXT_RQX_H_
+
+struct bnxt;
+struct bnxt_rx_ring_info;
+struct bnxt_cp_ring_info;
+struct bnxt_rx_queue {
+ struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
+ struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */
+ struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */
+ uint64_t mbuf_initializer; /* val to init mbuf */
+ uint16_t nb_rx_desc; /* num of RX desc */
+ uint16_t rx_tail; /* cur val of RDT register */
+ uint16_t nb_rx_hold; /* num held free RX desc */
+ uint16_t rx_free_thresh; /* max free RX desc to hold */
+ uint16_t queue_id; /* RX queue index */
+ uint16_t reg_idx; /* RX queue register index */
+ uint8_t port_id; /* Device port identifier */
+ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+
+ struct bnxt *bp;
+ struct bnxt_vnic_info *vnic;
+
+ uint32_t rx_buf_size;
+ uint32_t rx_buf_use_size; /* useable size */
+ struct bnxt_rx_ring_info *rx_ring;
+ struct bnxt_cp_ring_info *cp_ring;
+};
+
+void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
+int bnxt_mq_rx_configure(struct bnxt *bp);
+void bnxt_rx_queue_release_op(void *rx_queue);
+int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+void bnxt_free_rx_mbufs(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
new file mode 100644
index 00000000..5d93de26
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -0,0 +1,367 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_ring.h"
+#include "bnxt_rxr.h"
+#include "bnxt_rxq.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * RX Ring handling
+ */
+
+static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
+{
+ struct rte_mbuf *data;
+
+ data = rte_mbuf_raw_alloc(mb);
+
+ return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
+ struct bnxt_rx_ring_info *rxr,
+ uint16_t prod)
+{
+ struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+ struct rte_mbuf *data;
+
+ data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->mbuf = data;
+
+ rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+
+ return 0;
+}
+
+static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+ struct rte_mbuf *mbuf)
+{
+ uint16_t prod = rxr->rx_prod;
+ struct bnxt_sw_rx_bd *prod_rx_buf;
+ struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+
+ prod_rx_buf = &rxr->rx_buf_ring[prod];
+
+ prod_rx_buf->mbuf = mbuf;
+
+ prod_bd = &rxr->rx_desc_ring[prod];
+ cons_bd = &rxr->rx_desc_ring[cons];
+
+ prod_bd->addr = cons_bd->addr;
+}
+
+static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
+ struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rx_pkt_cmpl_hi *rxcmp1;
+ uint32_t tmp_raw_cons = *raw_cons;
+ uint16_t cons, prod, cp_cons =
+ RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct rte_mbuf *mbuf;
+ int rc = 0;
+
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+ tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
+ rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
+
+ if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
+ return -EBUSY;
+
+ prod = rxr->rx_prod;
+
+ /* EW - GRO deferred to phase 3 */
+ cons = rxcmp->opaque;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mbuf = rx_buf->mbuf;
+ rte_prefetch0(mbuf);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rxcmp->len;
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = 0;
+ if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
+ mbuf->hash.rss = rxcmp->rss_hash;
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rxcmp1->cfa_code;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
+ mbuf->vlan_tci = rxcmp1->metadata &
+ (RX_PKT_CMPL_METADATA_VID_MASK |
+ RX_PKT_CMPL_METADATA_DE |
+ RX_PKT_CMPL_METADATA_PRI_MASK);
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ }
+
+ rx_buf->mbuf = NULL;
+ if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
+ /* Re-install the mbuf back to the rx ring */
+ bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
+
+ rc = -EIO;
+ goto next_rx;
+ }
+ /*
+ * TODO: Redesign this....
+ * If the allocation fails, the packet does not get received.
+ * Simply returning this will result in slowly falling behind
+ * on the producer ring buffers.
+ * Instead, "filling up" the producer just before ringing the
+ * doorbell could be a better solution since it will let the
+ * producer ring starve until memory is available again pushing
+ * the drops into hardware and getting them out of the driver
+ * allowing recovery to a full producer ring.
+ *
+ * This could also help with cache usage by preventing per-packet
+ * calls in favour of a tight loop with the same function being called
+ * in it.
+ */
+ if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
+ RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+
+ /*
+ * All MBUFs are allocated with the same size under DPDK,
+ * no optimization for rx_copy_thresh
+ */
+
+ /* AGG buf operation is deferred */
+
+ /* EW - VLAN reception. Must compare against the ol_flags */
+
+ *rx_pkt = mbuf;
+next_rx:
+ rxr->rx_prod = RING_NEXT(rxr->rx_ring_struct, prod);
+
+ *raw_cons = tmp_raw_cons;
+
+ return rc;
+}
+
+uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct bnxt_rx_queue *rxq = rx_queue;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint32_t raw_cons = cpr->cp_raw_cons;
+ uint32_t cons;
+ int nb_rx_pkts = 0;
+ bool rx_event = false;
+ struct rx_pkt_cmpl *rxcmp;
+
+ /* Handle RX burst request */
+ while (1) {
+ int rc;
+
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ rte_prefetch0(&cpr->cp_desc_ring[cons]);
+ rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ break;
+
+ /* TODO: Avoid magic numbers... */
+ if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
+ rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
+ if (likely(!rc))
+ nb_rx_pkts++;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ rx_event = true;
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ if (nb_rx_pkts == nb_pkts)
+ break;
+ }
+ if (raw_cons == cpr->cp_raw_cons) {
+ /*
+ * For PMD, there is no need to keep on pushing to REARM
+ * the doorbell if there are no new completions
+ */
+ return nb_rx_pkts;
+ }
+ cpr->cp_raw_cons = raw_cons;
+
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ if (rx_event)
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ return nb_rx_pkts;
+}
+
+void bnxt_free_rx_rings(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < (int)bp->rx_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+ rte_free(rxq->rx_ring->rx_ring_struct);
+ rte_free(rxq->rx_ring);
+
+ bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+ rte_free(rxq->cp_ring->cp_ring_struct);
+ rte_free(rxq->cp_ring);
+
+ rte_free(rxq);
+ bp->rx_queues[i] = NULL;
+ }
+}
+
+int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
+{
+ struct bnxt *bp = rxq->bp;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring *ring;
+
+ rxq->rx_buf_use_size = bp->eth_dev->data->mtu +
+ ETHER_HDR_LEN + ETHER_CRC_LEN +
+ (2 * VLAN_TAG_SIZE);
+ rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
+
+ rxr = rte_zmalloc_socket("bnxt_rx_ring",
+ sizeof(struct bnxt_rx_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxr == NULL)
+ return -ENOMEM;
+ rxq->rx_ring = rxr;
+
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ rxr->rx_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)rxr->rx_desc_ring;
+ ring->bd_dma = rxr->rx_desc_mapping;
+ ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
+ ring->vmem = (void **)&rxr->rx_buf_ring;
+
+ cpr = rte_zmalloc_socket("bnxt_rx_ring",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+ rxq->cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ cpr->cp_ring_struct = ring;
+ ring->ring_size = rxr->rx_ring_struct->ring_size * 2;
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)cpr->cp_desc_ring;
+ ring->bd_dma = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+
+ return 0;
+}
+
+static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
+ uint16_t len)
+{
+ uint32_t j;
+ struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
+
+ if (!rx_bd_ring)
+ return;
+ for (j = 0; j < ring->ring_size; j++) {
+ rx_bd_ring[j].flags_type = type;
+ rx_bd_ring[j].len = len;
+ rx_bd_ring[j].opaque = j;
+ }
+}
+
+int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring *ring;
+ uint32_t prod, type;
+ unsigned int i;
+
+ type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
+
+ rxr = rxq->rx_ring;
+ ring = rxr->rx_ring_struct;
+ bnxt_init_rxbds(ring, type, rxq->rx_buf_use_size);
+
+ prod = rxr->rx_prod;
+ for (i = 0; i < ring->ring_size; i++) {
+ if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
+ RTE_LOG(WARNING, PMD,
+ "init'ed rx ring %d with %d/%d mbufs only\n",
+ rxq->queue_id, i, ring->ring_size);
+ break;
+ }
+ rxr->rx_prod = prod;
+ prod = RING_NEXT(rxr->rx_ring_struct, prod);
+ }
+
+ return 0;
+}
diff --git a/app/test-pmd/mempool_osdep.h b/drivers/net/bnxt/bnxt_rxr.h
index 6b8df68a..f766b26c 100644
--- a/app/test-pmd/mempool_osdep.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) Broadcom Limited.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
+ * * Neither the name of Broadcom Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,24 +31,32 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _MEMPOOL_OSDEP_H_
-#define _MEMPOOL_OSDEP_H_
+#ifndef _BNXT_RXR_H_
+#define _BNXT_RXR_H_
-#include <rte_mempool.h>
+#define B_RX_DB(db, prod) \
+ (*(uint32_t *)db = (DB_KEY_RX | prod))
-/**
- * @file
- * mempool OS specific header.
- */
+struct bnxt_sw_rx_bd {
+ struct rte_mbuf *mbuf; /* data associated with RX descriptor */
+};
-/*
- * Create mempool over objects from mmap(..., MAP_ANONYMOUS, ...).
- */
-struct rte_mempool *
-mempool_anon_create(const char *name, unsigned n, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags);
-
-#endif /*_RTE_MEMPOOL_OSDEP_H_ */
+struct bnxt_rx_ring_info {
+ uint16_t rx_prod;
+ void *rx_doorbell;
+
+ struct rx_prod_pkt_bd *rx_desc_ring;
+ struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */
+
+ phys_addr_t rx_desc_mapping;
+
+ struct bnxt_ring *rx_ring_struct;
+};
+
+uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void bnxt_free_rx_rings(struct bnxt *bp);
+int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
+int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
new file mode 100644
index 00000000..6f1c7602
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -0,0 +1,142 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_rxq.h"
+#include "bnxt_stats.h"
+#include "bnxt_txq.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Statistics functions
+ */
+
+void bnxt_free_stats(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < (int)bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+
+ bnxt_free_txq_stats(txq);
+ }
+ for (i = 0; i < (int)bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+ bnxt_free_rxq_stats(rxq);
+ }
+}
+
+void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *bnxt_stats)
+{
+ unsigned int i;
+ struct bnxt *bp = eth_dev->data->dev_private;
+
+ memset(bnxt_stats, 0, sizeof(*bnxt_stats));
+
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct ctx_hw_stats64 *hw_stats =
+ (struct ctx_hw_stats64 *)cpr->hw_stats;
+
+ bnxt_stats->q_ipackets[i] +=
+ rte_le_to_cpu_64(hw_stats->rx_ucast_pkts);
+ bnxt_stats->q_ipackets[i] +=
+ rte_le_to_cpu_64(hw_stats->rx_mcast_pkts);
+ bnxt_stats->q_ipackets[i] +=
+ rte_le_to_cpu_64(hw_stats->rx_bcast_pkts);
+
+ bnxt_stats->q_ibytes[i] +=
+ rte_le_to_cpu_64(hw_stats->rx_ucast_bytes);
+ bnxt_stats->q_ibytes[i] +=
+ rte_le_to_cpu_64(hw_stats->rx_mcast_bytes);
+ bnxt_stats->q_ibytes[i] +=
+ rte_le_to_cpu_64(hw_stats->rx_bcast_bytes);
+
+ /*
+ * TBD: No clear mapping to this... we don't seem
+ * to have a stat specifically for dropped due to
+ * insufficient mbufs.
+ */
+ bnxt_stats->q_errors[i] = 0;
+
+ /* These get replaced once the *_QSTATS commands work */
+ bnxt_stats->ipackets += bnxt_stats->q_ipackets[i];
+ bnxt_stats->ibytes += bnxt_stats->q_ibytes[i];
+ bnxt_stats->imissed += bnxt_stats->q_errors[i];
+ bnxt_stats->ierrors +=
+ rte_le_to_cpu_64(hw_stats->rx_err_pkts);
+ }
+
+ for (i = 0; i < bp->tx_cp_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ struct ctx_hw_stats64 *hw_stats =
+ (struct ctx_hw_stats64 *)cpr->hw_stats;
+
+ bnxt_stats->q_opackets[i] +=
+ rte_le_to_cpu_64(hw_stats->tx_ucast_pkts);
+ bnxt_stats->q_opackets[i] +=
+ rte_le_to_cpu_64(hw_stats->tx_mcast_pkts);
+ bnxt_stats->q_opackets[i] +=
+ rte_le_to_cpu_64(hw_stats->tx_bcast_pkts);
+
+ bnxt_stats->q_obytes[i] +=
+ rte_le_to_cpu_64(hw_stats->tx_ucast_bytes);
+ bnxt_stats->q_obytes[i] +=
+ rte_le_to_cpu_64(hw_stats->tx_mcast_bytes);
+ bnxt_stats->q_obytes[i] +=
+ rte_le_to_cpu_64(hw_stats->tx_bcast_bytes);
+
+ /* These get replaced once the *_QSTATS commands work */
+ bnxt_stats->opackets += bnxt_stats->q_opackets[i];
+ bnxt_stats->obytes += bnxt_stats->q_obytes[i];
+ bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts);
+ bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_err_pkts);
+ }
+}
+
+void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+
+ bnxt_clear_all_hwrm_stat_ctxs(bp);
+}
diff --git a/lib/librte_vhost/virtio-net.h b/drivers/net/bnxt/bnxt_stats.h
index 75fb57e5..65408a44 100644
--- a/lib/librte_vhost/virtio-net.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014-2015 Broadcom Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
+ * * Neither the name of Broadcom Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,13 +31,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _VIRTIO_NET_H
-#define _VIRTIO_NET_H
+#ifndef _BNXT_STATS_H_
+#define _BNXT_STATS_H_
-#include "vhost-net.h"
-#include "rte_virtio_net.h"
+#include <rte_ethdev.h>
-struct virtio_net_device_ops const *notify_ops;
-struct virtio_net *get_device(struct vhost_device_ctx ctx);
+void bnxt_free_stats(struct bnxt *bp);
+void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *bnxt_stats);
+void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev);
#endif
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
new file mode 100644
index 00000000..99dddddf
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -0,0 +1,162 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_ring.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+
+/*
+ * TX Queues
+ */
+
+void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
+{
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+
+ if (cpr->hw_stats)
+ cpr->hw_stats = NULL;
+}
+
+static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
+{
+ struct bnxt_sw_tx_bd *sw_ring;
+ uint16_t i;
+
+ sw_ring = txq->tx_ring->tx_buf_ring;
+ if (sw_ring) {
+ for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+void bnxt_free_tx_mbufs(struct bnxt *bp)
+{
+ struct bnxt_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < (int)bp->tx_nr_rings; i++) {
+ txq = bp->tx_queues[i];
+ bnxt_tx_queue_release_mbufs(txq);
+ }
+}
+
+void bnxt_tx_queue_release_op(void *tx_queue)
+{
+ struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+
+ if (txq) {
+ /* Free TX ring hardware descriptors */
+ bnxt_tx_queue_release_mbufs(txq);
+ bnxt_free_ring(txq->tx_ring->tx_ring_struct);
+
+ /* Free TX completion ring hardware descriptors */
+ bnxt_free_ring(txq->cp_ring->cp_ring_struct);
+
+ bnxt_free_txq_stats(txq);
+
+ rte_free(txq);
+ }
+}
+
+int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_tx_queue *txq;
+ int rc = 0;
+
+ if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
+ RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (eth_dev->data->tx_queues) {
+ txq = eth_dev->data->tx_queues[queue_idx];
+ if (txq) {
+ bnxt_tx_queue_release_op(txq);
+ txq = NULL;
+ }
+ }
+ txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!txq) {
+ RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
+ rc = -ENOMEM;
+ goto out;
+ }
+ txq->bp = bp;
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_free_thresh = tx_conf->tx_free_thresh;
+
+ rc = bnxt_init_tx_ring_struct(txq, socket_id);
+ if (rc)
+ goto out;
+
+ txq->queue_id = queue_idx;
+ txq->port_id = eth_dev->data->port_id;
+
+ /* Allocate TX ring hardware descriptors */
+ if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
+ "txr")) {
+ RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
+ bnxt_tx_queue_release_op(txq);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (bnxt_init_one_tx_ring(txq)) {
+ RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
+ bnxt_tx_queue_release_op(txq);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ eth_dev->data->tx_queues[queue_idx] = txq;
+
+out:
+ return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
new file mode 100644
index 00000000..16f3a0bd
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -0,0 +1,75 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_TXQ_H_
+#define _BNXT_TXQ_H_
+
+struct bnxt_tx_ring_info;
+struct bnxt_cp_ring_info;
+struct bnxt_tx_queue {
+ uint16_t nb_tx_desc; /* number of TX descriptors */
+ uint16_t tx_free_thresh;/* minimum TX before freeing */
+ /** Index to last TX descriptor to have been cleaned. */
+ uint16_t last_desc_cleaned;
+ /** Total number of TX descriptors ready to be allocated. */
+ uint16_t tx_next_dd; /* next desc to scan for DD bit */
+ uint16_t tx_next_rs; /* next desc to set RS bit */
+ uint16_t queue_id; /* TX queue index */
+ uint16_t reg_idx; /* TX queue register index */
+ uint8_t port_id; /* Device port identifier */
+ uint8_t pthresh; /* Prefetch threshold register */
+ uint8_t hthresh; /* Host threshold register */
+ uint8_t wthresh; /* Write-back threshold reg */
+ uint32_t txq_flags; /* Holds flags for this TXq */
+ uint32_t ctx_curr; /* Hardware context states */
+ uint8_t tx_deferred_start; /* not in global dev start */
+
+ struct bnxt *bp;
+ int index;
+ int tx_wake_thresh;
+ struct bnxt_tx_ring_info *tx_ring;
+
+ unsigned int cp_nr_rings;
+ struct bnxt_cp_ring_info *cp_ring;
+};
+
+void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
+void bnxt_free_tx_mbufs(struct bnxt *bp);
+void bnxt_tx_queue_release_op(void *tx_queue);
+int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
new file mode 100644
index 00000000..8bf8fee3
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -0,0 +1,339 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_ring.h"
+#include "bnxt_txq.h"
+#include "bnxt_txr.h"
+#include "hsi_struct_def_dpdk.h"
+#include <stdbool.h>
+
+/*
+ * TX Ring handling
+ */
+
+void bnxt_free_tx_rings(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < (int)bp->tx_nr_rings; i++) {
+ struct bnxt_tx_queue *txq = bp->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ bnxt_free_ring(txq->tx_ring->tx_ring_struct);
+ rte_free(txq->tx_ring->tx_ring_struct);
+ rte_free(txq->tx_ring);
+
+ bnxt_free_ring(txq->cp_ring->cp_ring_struct);
+ rte_free(txq->cp_ring->cp_ring_struct);
+ rte_free(txq->cp_ring);
+
+ rte_free(txq);
+ bp->tx_queues[i] = NULL;
+ }
+}
+
+int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct bnxt_ring *ring = txr->tx_ring_struct;
+
+ txq->tx_wake_thresh = ring->ring_size / 2;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ return 0;
+}
+
+int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
+{
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_ring *ring;
+
+ txr = rte_zmalloc_socket("bnxt_tx_ring",
+ sizeof(struct bnxt_tx_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txr == NULL)
+ return -ENOMEM;
+ txq->tx_ring = txr;
+
+ ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ txr->tx_ring_struct = ring;
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)txr->tx_desc_ring;
+ ring->bd_dma = txr->tx_desc_mapping;
+ ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
+ ring->vmem = (void **)&txr->tx_buf_ring;
+
+ cpr = rte_zmalloc_socket("bnxt_tx_ring",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cpr == NULL)
+ return -ENOMEM;
+ txq->cp_ring = cpr;
+
+ ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+ cpr->cp_ring_struct = ring;
+ ring->ring_size = txr->tx_ring_struct->ring_size;
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)cpr->cp_desc_ring;
+ ring->bd_dma = cpr->cp_desc_mapping;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+
+ return 0;
+}
+
+static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
+{
+ /* Tell compiler to fetch tx indices from memory. */
+ rte_compiler_barrier();
+
+ return txr->tx_ring_struct->ring_size -
+ ((txr->tx_prod - txr->tx_cons) &
+ txr->tx_ring_struct->ring_mask) - 1;
+}
+
+static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
+ struct bnxt_tx_queue *txq)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct tx_bd_long *txbd;
+ struct tx_bd_long_hi *txbd1;
+ uint32_t vlan_tag_flags, cfa_action;
+ bool long_bd = false;
+ uint16_t last_prod = 0;
+ struct rte_mbuf *m_seg;
+ struct bnxt_sw_tx_bd *tx_buf;
+ static const uint32_t lhint_arr[4] = {
+ TX_BD_LONG_FLAGS_LHINT_LT512,
+ TX_BD_LONG_FLAGS_LHINT_LT1K,
+ TX_BD_LONG_FLAGS_LHINT_LT2K,
+ TX_BD_LONG_FLAGS_LHINT_LT2K
+ };
+
+ if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
+ PKT_TX_VLAN_PKT))
+ long_bd = true;
+
+ tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+ tx_buf->mbuf = tx_pkt;
+ tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
+ last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) &
+ txr->tx_ring_struct->ring_mask;
+
+ if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
+ return -ENOMEM;
+
+ txbd = &txr->tx_desc_ring[txr->tx_prod];
+ txbd->opaque = txr->tx_prod;
+ txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
+ txbd->len = tx_pkt->data_len;
+ if (txbd->len >= 2014)
+ txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
+ else
+ txbd->flags_type |= lhint_arr[txbd->len >> 9];
+ txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf));
+
+ if (long_bd) {
+ txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
+ vlan_tag_flags = 0;
+ cfa_action = 0;
+ if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ /* shurd: Should this mask at
+ * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
+ */
+ vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
+ tx_buf->mbuf->vlan_tci;
+ /* Currently supports 8021Q, 8021AD vlan offloads
+ * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+ */
+ /* DPDK only supports 802.11q VLAN packets */
+ vlan_tag_flags |=
+ TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
+ }
+
+ txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
+
+ txbd1 = (struct tx_bd_long_hi *)
+ &txr->tx_desc_ring[txr->tx_prod];
+ txbd1->lflags = 0;
+ txbd1->cfa_meta = vlan_tag_flags;
+ txbd1->cfa_action = cfa_action;
+
+ if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
+ /* TSO */
+ txbd1->lflags = TX_BD_LONG_LFLAGS_LSO;
+ txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
+ tx_pkt->l4_len;
+ txbd1->mss = tx_pkt->tso_segsz;
+
+ } else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM)) {
+ /* TCP/UDP CSO */
+ txbd1->lflags = TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+
+ } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
+ /* IP CSO */
+ txbd1->lflags = TX_BD_LONG_LFLAGS_IP_CHKSUM;
+ txbd1->mss = 0;
+ }
+ } else {
+ txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
+ }
+
+ m_seg = tx_pkt->next;
+ /* i is set at the end of the if(long_bd) block */
+ while (txr->tx_prod != last_prod) {
+ txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
+ tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+
+ txbd = &txr->tx_desc_ring[txr->tx_prod];
+ txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(m_seg));
+ txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
+ txbd->len = m_seg->data_len;
+
+ m_seg = m_seg->next;
+ }
+
+ txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
+
+ txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
+
+ return 0;
+}
+
+static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
+{
+ struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ uint16_t cons = txr->tx_cons;
+ int i, j;
+
+ for (i = 0; i < nr_pkts; i++) {
+ struct bnxt_sw_tx_bd *tx_buf;
+ struct rte_mbuf *mbuf;
+
+ tx_buf = &txr->tx_buf_ring[cons];
+ cons = RING_NEXT(txr->tx_ring_struct, cons);
+ mbuf = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
+
+ /* EW - no need to unmap DMA memory? */
+
+ for (j = 1; j < tx_buf->nr_bds; j++)
+ cons = RING_NEXT(txr->tx_ring_struct, cons);
+ rte_pktmbuf_free(mbuf);
+ }
+
+ txr->tx_cons = cons;
+}
+
+static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
+{
+ struct bnxt_cp_ring_info *cpr = txq->cp_ring;
+ uint32_t raw_cons = cpr->cp_raw_cons;
+ uint32_t cons;
+ int nb_tx_pkts = 0;
+ struct tx_cmpl *txcmp;
+
+ if ((txq->tx_ring->tx_ring_struct->ring_size -
+ (bnxt_tx_avail(txq->tx_ring))) >
+ txq->tx_free_thresh) {
+ while (1) {
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
+
+ if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct))
+ break;
+
+ if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
+ nb_tx_pkts++;
+ else
+ RTE_LOG(DEBUG, PMD,
+ "Unhandled CMP type %02x\n",
+ CMP_TYPE(txcmp));
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ }
+ if (nb_tx_pkts)
+ bnxt_tx_cmp(txq, nb_tx_pkts);
+ cpr->cp_raw_cons = raw_cons;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ }
+ return nb_tx_pkts;
+}
+
+uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct bnxt_tx_queue *txq = tx_queue;
+ uint16_t nb_tx_pkts = 0;
+ uint16_t db_mask = txq->tx_ring->tx_ring_struct->ring_size >> 2;
+ uint16_t last_db_mask = 0;
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ /* Handle TX burst request */
+ for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
+ if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
+ break;
+ } else if ((nb_tx_pkts & db_mask) != last_db_mask) {
+ B_TX_DB(txq->tx_ring->tx_doorbell,
+ txq->tx_ring->tx_prod);
+ last_db_mask = nb_tx_pkts & db_mask;
+ }
+ }
+ if (nb_tx_pkts)
+ B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);
+
+ return nb_tx_pkts;
+}
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
new file mode 100644
index 00000000..2176acaa
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -0,0 +1,71 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_TXR_H_
+#define _BNXT_TXR_H_
+
+#define MAX_TX_RINGS 16
+#define BNXT_TX_PUSH_THRESH 92
+
+#define B_TX_DB(db, prod) \
+ (*(uint32_t *)db = (DB_KEY_TX | prod))
+
+struct bnxt_tx_ring_info {
+ uint16_t tx_prod;
+ uint16_t tx_cons;
+ void *tx_doorbell;
+
+ struct tx_bd_long *tx_desc_ring;
+ struct bnxt_sw_tx_bd *tx_buf_ring;
+
+ phys_addr_t tx_desc_mapping;
+
+#define BNXT_DEV_STATE_CLOSING 0x1
+ uint32_t dev_state;
+
+ struct bnxt_ring *tx_ring_struct;
+};
+
+struct bnxt_sw_tx_bd {
+ struct rte_mbuf *mbuf; /* mbuf associated with TX descriptor */
+ uint8_t is_gso;
+ unsigned short nr_bds;
+};
+
+void bnxt_free_tx_rings(struct bnxt *bp);
+int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
+int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
+uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
new file mode 100644
index 00000000..c04c4c74
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -0,0 +1,277 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Broadcom Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_vnic.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * VNIC Functions
+ */
+
+static void prandom_bytes(void *dest_ptr, size_t len)
+{
+ char *dest = (char *)dest_ptr;
+ uint64_t rb;
+
+ while (len) {
+ rb = rte_rand();
+ if (len >= 8) {
+ memcpy(dest, &rb, 8);
+ len -= 8;
+ dest += 8;
+ } else {
+ memcpy(dest, &rb, len);
+ dest += len;
+ len = 0;
+ }
+ }
+}
+
+void bnxt_init_vnics(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ uint16_t max_vnics;
+ int i, j;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ max_vnics = pf->max_vnics;
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ max_vnics = vf->max_vnics;
+ }
+ STAILQ_INIT(&bp->free_vnic_list);
+ for (i = 0; i < max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
+
+ for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
+ vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
+
+ prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+ STAILQ_INIT(&vnic->filter);
+ STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next);
+ }
+ for (i = 0; i < MAX_FF_POOLS; i++)
+ STAILQ_INIT(&bp->ff_pool[i]);
+}
+
+int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int pool)
+{
+ struct bnxt_vnic_info *temp;
+
+ temp = STAILQ_FIRST(&bp->ff_pool[pool]);
+ while (temp) {
+ if (temp == vnic) {
+ STAILQ_REMOVE(&bp->ff_pool[pool], vnic,
+ bnxt_vnic_info, next);
+ vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE;
+ STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic,
+ next);
+ return 0;
+ }
+ temp = STAILQ_NEXT(temp, next);
+ }
+ RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool);
+ return -EINVAL;
+}
+
+struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+
+ /* Find the 1st unused vnic from the free_vnic_list pool*/
+ vnic = STAILQ_FIRST(&bp->free_vnic_list);
+ if (!vnic) {
+ RTE_LOG(ERR, PMD, "No more free VNIC resources\n");
+ return NULL;
+ }
+ STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
+ return vnic;
+}
+
+void bnxt_free_all_vnics(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *temp, *next;
+ int i;
+
+ for (i = 0; i < MAX_FF_POOLS; i++) {
+ temp = STAILQ_FIRST(&bp->ff_pool[i]);
+ while (temp) {
+ next = STAILQ_NEXT(temp, next);
+ STAILQ_REMOVE(&bp->ff_pool[i], temp, bnxt_vnic_info,
+ next);
+ STAILQ_INSERT_TAIL(&bp->free_vnic_list, temp, next);
+ temp = next;
+ }
+ }
+}
+
+void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+
+ STAILQ_FOREACH(vnic, &bp->free_vnic_list, next) {
+ if (vnic->rss_table) {
+ /* 'Unreserve' the rss_table */
+ /* N/A */
+
+ vnic->rss_table = NULL;
+ }
+
+ if (vnic->rss_hash_key) {
+ /* 'Unreserve' the rss_hash_key */
+ /* N/A */
+
+ vnic->rss_hash_key = NULL;
+ }
+ }
+}
+
+int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ struct rte_pci_device *pdev = bp->pdev;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int entry_length = RTE_CACHE_LINE_ROUNDUP(
+ HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
+ HW_HASH_KEY_SIZE);
+ uint16_t max_vnics;
+ int i;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ max_vnics = pf->max_vnics;
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ max_vnics = vf->max_vnics;
+ }
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
+ "bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ mz = rte_memzone_lookup(mz_name);
+ if (!mz) {
+ mz = rte_memzone_reserve(mz_name,
+ entry_length * max_vnics,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (!mz)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ /* Allocate rss table and hash key */
+ vnic->rss_table =
+ (void *)((char *)mz->addr + (entry_length * i));
+ memset(vnic->rss_table, -1, entry_length);
+
+ vnic->rss_table_dma_addr = mz->phys_addr + (entry_length * i);
+ vnic->rss_hash_key = (void *)((char *)vnic->rss_table +
+ HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table));
+
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr +
+ HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table);
+ }
+
+ return 0;
+}
+
+void bnxt_free_vnic_mem(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic;
+ uint16_t max_vnics, i;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ max_vnics = pf->max_vnics;
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ max_vnics = vf->max_vnics;
+ }
+ for (i = 0; i < max_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
+ RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n");
+ /* TODO Call HWRM to free VNIC */
+ }
+ }
+
+ rte_free(bp->vnic_info);
+ bp->vnic_info = NULL;
+}
+
+int bnxt_alloc_vnic_mem(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic_mem;
+ uint16_t max_vnics;
+
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ max_vnics = pf->max_vnics;
+ } else {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ max_vnics = vf->max_vnics;
+ }
+ /* Allocate memory for VNIC pool and filter pool */
+ vnic_mem = rte_zmalloc("bnxt_vnic_info",
+ max_vnics * sizeof(struct bnxt_vnic_info), 0);
+ if (vnic_mem == NULL) {
+ RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs",
+ max_vnics);
+ return -ENOMEM;
+ }
+ bp->vnic_info = vnic_mem;
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
new file mode 100644
index 00000000..9671ba42
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -0,0 +1,80 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Broadcom Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_VNIC_H_
+#define _BNXT_VNIC_H_
+
+#include <sys/queue.h>
+#include <stdbool.h>
+
+struct bnxt_vnic_info {
+ STAILQ_ENTRY(bnxt_vnic_info) next;
+ uint8_t ff_pool_idx;
+
+ uint16_t fw_vnic_id; /* returned by Chimp during alloc */
+ uint16_t fw_rss_cos_lb_ctx;
+ uint16_t ctx_is_rss_cos_lb;
+#define MAX_NUM_TRAFFIC_CLASSES 8
+#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
+#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
+ MAX_NUM_TRAFFIC_CLASSES)
+ uint16_t start_grp_id;
+ uint16_t end_grp_id;
+ uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
+ uint16_t hash_type;
+ phys_addr_t rss_table_dma_addr;
+ uint16_t *rss_table;
+ phys_addr_t rss_hash_key_dma_addr;
+ void *rss_hash_key;
+ uint32_t flags;
+#define BNXT_VNIC_INFO_PROMISC (1 << 0)
+#define BNXT_VNIC_INFO_ALLMULTI (1 << 1)
+
+ bool vlan_strip;
+ bool func_default;
+
+ STAILQ_HEAD(, bnxt_filter_info) filter;
+};
+
+struct bnxt;
+void bnxt_init_vnics(struct bnxt *bp);
+int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int pool);
+struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp);
+void bnxt_free_all_vnics(struct bnxt *bp);
+void bnxt_free_vnic_attributes(struct bnxt *bp);
+int bnxt_alloc_vnic_attributes(struct bnxt *bp);
+void bnxt_free_vnic_mem(struct bnxt *bp);
+int bnxt_alloc_vnic_mem(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
new file mode 100644
index 00000000..f2db3eaf
--- /dev/null
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -0,0 +1,5799 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Broadcom Limited.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _HSI_STRUCT_DEF_EXTERNAL_H_
+#define _HSI_STRUCT_DEF_EXTERNAL_H_
+
+/*
+ * per-context HW statistics -- chip view
+ */
+
+struct ctx_hw_stats64 {
+ uint64_t rx_ucast_pkts;
+ uint64_t rx_mcast_pkts;
+ uint64_t rx_bcast_pkts;
+ uint64_t rx_drop_pkts;
+ uint64_t rx_err_pkts;
+ uint64_t rx_ucast_bytes;
+ uint64_t rx_mcast_bytes;
+ uint64_t rx_bcast_bytes;
+
+ uint64_t tx_ucast_pkts;
+ uint64_t tx_mcast_pkts;
+ uint64_t tx_bcast_pkts;
+ uint64_t tx_drop_pkts;
+ uint64_t tx_err_pkts;
+ uint64_t tx_ucast_bytes;
+ uint64_t tx_mcast_bytes;
+ uint64_t tx_bcast_bytes;
+
+ uint64_t tpa_pkts;
+ uint64_t tpa_bytes;
+ uint64_t tpa_events;
+ uint64_t tpa_aborts;
+} ctx_hw_stats64_t;
+
+/* HW Resource Manager Specification 1.2.0 */
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 2
+#define HWRM_VERSION_UPDATE 0
+
+/*
+ * Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
+#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
+
+/*
+ * Request types
+ */
+#define HWRM_VER_GET (UINT32_C(0x0))
+#define HWRM_FUNC_RESET (UINT32_C(0x11))
+#define HWRM_FUNC_QCAPS (UINT32_C(0x15))
+#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a))
+#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d))
+#define HWRM_PORT_PHY_CFG (UINT32_C(0x20))
+#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27))
+#define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30))
+#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
+#define HWRM_VNIC_FREE (UINT32_C(0x41))
+#define HWRM_VNIC_CFG (UINT32_C(0x42))
+#define HWRM_VNIC_RSS_CFG (UINT32_C(0x46))
+#define HWRM_RING_ALLOC (UINT32_C(0x50))
+#define HWRM_RING_FREE (UINT32_C(0x51))
+#define HWRM_RING_GRP_ALLOC (UINT32_C(0x60))
+#define HWRM_RING_GRP_FREE (UINT32_C(0x61))
+#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70))
+#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (UINT32_C(0x71))
+#define HWRM_CFA_L2_FILTER_ALLOC (UINT32_C(0x90))
+#define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91))
+#define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92))
+#define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93))
+#define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0))
+#define HWRM_STAT_CTX_FREE (UINT32_C(0xb1))
+#define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3))
+#define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0))
+
+/* Return Codes */
+#define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2))
+#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3))
+
+/* Short TX BD (16 bytes) */
+struct tx_bd_short {
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs of a
+ * packet.
+ */
+ /* This value identifies the type of buffer descriptor. */
+ #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_SHORT_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is used for normal L2
+ * packet transmission.
+ */
+ #define TX_BD_SHORT_TYPE_TX_BD_SHORT (UINT32_C(0x0) << 0)
+ /*
+ * If set to 1, the packet ends with the data in the buffer pointed to
+ * by this descriptor. This flag must be valid on every BD.
+ */
+ #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40)
+ /*
+ * If set to 1, the device will not generate a completion for this
+ * transmit packet unless there is an error in it's processing. If this
+ * bit is set to 0, then the packet will be completed normally. This bit
+ * must be valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80)
+ /*
+ * This value indicates how many 16B BD locations are consumed in the
+ * ring by this packet. A value of 1 indicates that this BD is the only
+ * BD (and that the it is a short BD). A value of 3 indicates either 3
+ * short BDs or 1 long BD and one short BD in the packet. A value of 0
+ * indicates that there are 32 BD locations in the packet (the maximum).
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet. It is used
+ * by the chip to optimize internal processing. The packet will be
+ * dropped if the hint is too short. This field is valid only on the
+ * first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_SHORT_FLAGS_LHINT_SFT 13
+ /* indicates packet length < 512B */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ /* indicates 512 <= packet length < 1KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ /* indicates 1KB <= packet length < 2KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ /* indicates packet length >= 2KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K
+ /*
+ * If set to 1, the device immediately updates the Send Consumer Index
+ * after the buffer associated with this descriptor has been transferred
+ * via DMA to NIC memory from host memory. An interrupt may or may not
+ * be generated according to the state of the interrupt avoidance
+ * mechanisms. If this bit is set to 0, then the Consumer Index is only
+ * updated as soon as one of the host interrupt coalescing conditions
+ * has been met. This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000)
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs of a
+ * packet.
+ */
+ #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_SHORT_FLAGS_SFT 6
+ uint16_t flags_type;
+
+ /*
+ * This is the length of the host physical buffer this BD describes in
+ * bytes. This field must be valid on all BDs of a packet.
+ */
+ uint16_t len;
+ /*
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with the
+ * transmit BD. This field must be valid on the first BD of a packet.
+ */
+ uint32_t opaque;
+
+ /*
+ * This is the host physical address for the portion of the packet
+ * described by this TX BD. This value must be valid on all BDs of a
+ * packet.
+ */
+ uint64_t addr;
+} __attribute__((packed));
+
+/* Long TX BD (32 bytes split to 2 16-byte struct) */
+struct tx_bd_long {
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs of a
+ * packet.
+ */
+ /* This value identifies the type of buffer descriptor. */
+ #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_LONG_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 32B long and is used for normal L2
+ * packet transmission.
+ */
+ #define TX_BD_LONG_TYPE_TX_BD_LONG (UINT32_C(0x10) << 0)
+ /*
+ * If set to 1, the packet ends with the data in the buffer pointed to
+ * by this descriptor. This flag must be valid on every BD.
+ */
+ #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
+ /*
+ * If set to 1, the device will not generate a completion for this
+ * transmit packet unless there is an error in it's processing. If this
+ * bit is set to 0, then the packet will be completed normally. This bit
+ * must be valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
+ /*
+ * This value indicates how many 16B BD locations are consumed in the
+ * ring by this packet. A value of 1 indicates that this BD is the only
+ * BD (and that the it is a short BD). A value of 3 indicates either 3
+ * short BDs or 1 long BD and one short BD in the packet. A value of 0
+ * indicates that there are 32 BD locations in the packet (the maximum).
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet. It is used
+ * by the chip to optimize internal processing. The packet will be
+ * dropped if the hint is too short. This field is valid only on the
+ * first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_LONG_FLAGS_LHINT_SFT 13
+ /* indicates packet length < 512B */
+ #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ /* indicates 512 <= packet length < 1KB */
+ #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ /* indicates 1KB <= packet length < 2KB */
+ #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ /* indicates packet length >= 2KB */
+ #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K
+ /*
+ * If set to 1, the device immediately updates the Send Consumer Index
+ * after the buffer associated with this descriptor has been transferred
+ * via DMA to NIC memory from host memory. An interrupt may or may not
+ * be generated according to the state of the interrupt avoidance
+ * mechanisms. If this bit is set to 0, then the Consumer Index is only
+ * updated as soon as one of the host interrupt coalescing conditions
+ * has been met. This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs of a
+ * packet.
+ */
+ #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_LONG_FLAGS_SFT 6
+ uint16_t flags_type;
+
+ /*
+ * This is the length of the host physical buffer this BD describes in
+ * bytes. This field must be valid on all BDs of a packet.
+ */
+ uint16_t len;
+
+ /*
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with the
+ * transmit BD. This field must be valid on the first BD of a packet.
+ */
+ uint32_t opaque;
+
+ /*
+ * This is the host physical address for the portion of the packet
+ * described by this TX BD. This value must be valid on all BDs of a
+ * packet.
+ */
+ uint64_t addr;
+} __attribute__((packed));
+
+/* last 16 bytes of Long TX BD */
+
+struct tx_bd_long_hi {
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Their value on other BDs of the packet will be ignored.
+ */
+ /*
+ * If set to 1, the controller replaces the TCP/UPD checksum fields of
+ * normal TCP/UPD checksum, or the inner TCP/UDP checksum field of the
+ * encapsulated TCP/UDP packets with the hardware calculated TCP/UDP
+ * checksum for the packet associated with this descriptor. This bit
+ * must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
+ /*
+ * If set to 1, the controller replaces the IP checksum of the normal
+ * packets, or the inner IP checksum of the encapsulated packets with
+ * the hardware calculated IP checksum for the packet associated with
+ * this descriptor. This bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+ /*
+ * If set to 1, the controller will not append an Ethernet CRC to the
+ * end of the frame. This bit must be valid on the first BD of a packet.
+ * Packet must be 64B or longer when this flag is set. It is not useful
+ * to use this bit with any form of TX offload such as CSO or LSO. The
+ * intent is that the packet from the host already has a valid Ethernet
+ * CRC on the packet.
+ */
+ #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
+ /*
+ * If set to 1, the device will record the time at which the packet was
+ * actually transmitted at the TX MAC. This bit must be valid on the
+ * first BD of a packet.
+ */
+ #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
+ /*
+ * If set to 1, The controller replaces the tunnel IP checksum field
+ * with hardware calculated IP checksum for the IP header of the packet
+ * associated with this descriptor. In case of VXLAN, the controller
+ * also replaces the outer header UDP checksum with hardware calculated
+ * UDP checksum for the packet associated with this descriptor.
+ */
+ #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+ /*
+ * If set to 1, the device will treat this packet with LSO(Large Send
+ * Offload) processing for both normal or encapsulated packets, which is
+ * a form of TCP segmentation. When this bit is 1, the hdr_size and mss
+ * fields must be valid. The driver doesn't need to set t_ip_chksum,
+ * ip_chksum, and tcp_udp_chksum flags since the controller will replace
+ * the appropriate checksum fields for segmented packets. When this bit
+ * is 1, the hdr_size and mss fields must be valid.
+ */
+ #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
+ /*
+ * If set to zero when LSO is '1', then the IPID will be treated as a
+ * 16b number and will be wrapped if it exceeds a value of 0xffff. If
+ * set to one when LSO is '1', then the IPID will be treated as a 15b
+ * number and will be wrapped if it exceeds a value 0f 0x7fff.
+ */
+ #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
+ /*
+ * If set to zero when LSO is '1', then the IPID of the tunnel IP header
+ * will not be modified during LSO operations. If set to one when LSO is
+ * '1', then the IPID of the tunnel IP header will be incremented for
+ * each subsequent segment of an LSO operation.
+ */
+ #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
+ /*
+ * If set to '1', then the RoCE ICRC will be appended to the packet.
+ * Packet must be a valid RoCE format packet.
+ */
+ #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
+ /*
+ * If set to '1', then the FCoE CRC will be appended to the packet.
+ * Packet must be a valid FCoE format packet.
+ */
+ #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
+ uint16_t lflags;
+
+ /*
+ * When LSO is '1', this field must contain the offset of the TCP
+ * payload from the beginning of the packet in as 16b words. In case of
+ * encapsulated/tunneling packet, this field contains the offset of the
+ * inner TCP payload from beginning of the packet as 16-bit words. This
+ * value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
+ #define TX_BD_LONG_HDR_SIZE_SFT 0
+ uint16_t hdr_size;
+
+ /*
+ * This is the MSS value that will be used to do the LSO processing. The
+ * value is the length in bytes of the TCP payload for each segment
+ * generated by the LSO operation. This value must be valid on the first
+ * BD of a packet.
+ */
+ #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
+ #define TX_BD_LONG_MSS_SFT 0
+ uint32_t mss;
+
+ uint16_t unused_2;
+
+ /*
+ * This value selects a CFA action to perform on the packet. Set this
+ * value to zero if no CFA action is desired. This value must be valid
+ * on the first BD of a packet.
+ */
+ uint16_t cfa_action;
+
+ /*
+ * This value is action meta-data that defines CFA edit operations that
+ * are done in addition to any action editing.
+ */
+ /* When key=1, This is the VLAN tag VID value. */
+ #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
+ #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0
+ /* When key=1, This is the VLAN tag DE value. */
+ #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
+ /* When key=1, This is the VLAN tag PRI value. */
+ #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
+ #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13
+ /* When key=1, This is the VLAN tag TPID select value. */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16
+ /* 0x88a8 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
+ /* 0x8100 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
+ /* 0x9100 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
+ /* 0x9200 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
+ /* 0x9300 */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
+ /* Value programmed in CFA VLANTPID register. */
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
+ TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG
+ /* When key=1, This is the VLAN tag TPID select value. */
+ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
+ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19
+ /*
+ * This field identifies the type of edit to be performed on the packet.
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
+ #define TX_BD_LONG_CFA_META_KEY_SFT 28
+ /* No editing */
+ #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
+ /*
+ * - meta[17:16] - TPID select value (0 = 0x8100). - meta[15:12]
+ * - PRI/DE value. - meta[11:0] - VID value.
+ */
+ #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
+ #define TX_BD_LONG_CFA_META_KEY_LAST TX_BD_LONG_CFA_META_KEY_VLAN_TAG
+ uint32_t cfa_meta;
+} __attribute__((packed));
+
+/* RX Producer Packet BD (16 bytes) */
+struct rx_prod_pkt_bd {
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_PKT_BD_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is an RX Producer (ie.
+ * empty) buffer descriptor.
+ */
+ #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT (UINT32_C(0x4) << 0)
+ /*
+ * If set to 1, the packet will be placed at the address plus 2B. The 2
+ * Bytes of padding will be written as zero.
+ */
+ /*
+ * This is intended to be used when the host buffer is cache-line
+ * aligned to produce packets that are easy to parse in host memory
+ * while still allowing writes to be cache line aligned.
+ */
+ #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
+ /*
+ * If set to 1, the packet write will be padded out to the nearest
+ * cache-line with zero value padding.
+ */
+ /*
+ * If receive buffers start/end on cache-line boundaries, this feature
+ * will ensure that all data writes on the PCI bus start/end on cache
+ * line boundaries.
+ */
+ #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
+ /*
+ * This value is the number of additional buffers in the ring that
+ * describe the buffer space to be consumed for the this packet. If the
+ * value is zero, then the packet must fit within the space described by
+ * this BD. If this value is 1 or more, it indicates how many additional
+ * "buffer" BDs are in the ring immediately following this BD to be used
+ * for the same network packet. Even if the packet to be placed does not
+ * need all the additional buffers, they will be consumed anyway.
+ */
+ #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300)
+ #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8
+ #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_PKT_BD_FLAGS_SFT 6
+ uint16_t flags_type;
+
+ /*
+ * This is the length in Bytes of the host physical buffer where data
+ * for the packet may be placed in host memory.
+ */
+ /*
+ * While this is a Byte resolution value, it is often advantageous to
+ * ensure that the buffers provided end on a host cache line.
+ */
+ uint16_t len;
+
+ /*
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with this
+ * receive buffer set.
+ */
+ uint32_t opaque;
+
+ /*
+ * This is the host physical address where data for the packet may by
+ * placed in host memory.
+ */
+ /*
+ * While this is a Byte resolution value, it is often advantageous to
+ * ensure that the buffers provide start on a host cache line.
+ */
+ uint64_t addr;
+} __attribute__((packed));
+
+/* Completion Ring Structures */
+/* Note: This structure is used by the HWRM to communicate HWRM Error. */
+/* Base Completion Record (16 bytes) */
+struct cmpl_base {
+ /* unused is 10 b */
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records.
+ */
+ #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define CMPL_BASE_TYPE_SFT 0
+ /* TX L2 completion: Completion of TX packet. Length = 16B */
+ #define CMPL_BASE_TYPE_TX_L2 (UINT32_C(0x0) << 0)
+ /*
+ * RX L2 completion: Completion of and L2 RX packet.
+ * Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_L2 (UINT32_C(0x11) << 0)
+ /*
+ * RX Aggregation Buffer completion : Completion of an L2
+ * aggregation buffer in support of TPA, HDS, or Jumbo packet
+ * completion. Length = 16B
+ */
+ #define CMPL_BASE_TYPE_RX_AGG (UINT32_C(0x12) << 0)
+ /*
+ * RX L2 TPA Start Completion: Completion at the beginning of a
+ * TPA operation. Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_TPA_START (UINT32_C(0x13) << 0)
+ /*
+ * RX L2 TPA End Completion: Completion at the end of a TPA
+ * operation. Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_TPA_END (UINT32_C(0x15) << 0)
+ /*
+ * Statistics Ejection Completion: Completion of statistics data
+ * ejection buffer. Length = 16B
+ */
+ #define CMPL_BASE_TYPE_STAT_EJECT (UINT32_C(0x1a) << 0)
+ /* HWRM Command Completion: Completion of an HWRM command. */
+ #define CMPL_BASE_TYPE_HWRM_DONE (UINT32_C(0x20) << 0)
+ /* Forwarded HWRM Request */
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ (UINT32_C(0x22) << 0)
+ /* Forwarded HWRM Response */
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP (UINT32_C(0x24) << 0)
+ /* HWRM Asynchronous Event Information */
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (UINT32_C(0x2e) << 0)
+ /* CQ Notification */
+ #define CMPL_BASE_TYPE_CQ_NOTIFICATION (UINT32_C(0x30) << 0)
+ /* SRQ Threshold Event */
+ #define CMPL_BASE_TYPE_SRQ_EVENT (UINT32_C(0x32) << 0)
+ /* DBQ Threshold Event */
+ #define CMPL_BASE_TYPE_DBQ_EVENT (UINT32_C(0x34) << 0)
+ /* QP Async Notification */
+ #define CMPL_BASE_TYPE_QP_EVENT (UINT32_C(0x38) << 0)
+ /* Function Async Notification */
+ #define CMPL_BASE_TYPE_FUNC_EVENT (UINT32_C(0x3a) << 0)
+ uint16_t type;
+
+ uint16_t info1;
+ uint32_t info2;
+
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ #define CMPL_BASE_V UINT32_C(0x1)
+ /* info3 is 31 b */
+ #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
+ #define CMPL_BASE_INFO3_SFT 1
+ uint32_t info3_v;
+
+ uint32_t info4;
+} __attribute__((packed));
+
+/* TX Completion Record (16 bytes) */
+struct tx_cmpl {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records.
+ */
+ #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define TX_CMPL_TYPE_SFT 0
+ /* TX L2 completion: Completion of TX packet. Length = 16B */
+ #define TX_CMPL_TYPE_TX_L2 (UINT32_C(0x0) << 0)
+ /*
+ * When this bit is '1', it indicates a packet that has an error of some
+ * type. Type of error is indicated in error_flags.
+ */
+ #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /*
+ * When this bit is '1', it indicates that the packet completed was
+ * transmitted using the push acceleration data provided by the driver.
+ * When this bit is '0', it indicates that the packet had not push
+ * acceleration data written or was executed as a normal packet even
+ * though push data was provided.
+ */
+ #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
+ #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_CMPL_FLAGS_SFT 6
+ uint16_t flags_type;
+
+ uint16_t unused_0;
+
+ /*
+ * This is a copy of the opaque field from the first TX BD of this
+ * transmitted packet.
+ */
+ uint32_t opaque;
+
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ #define TX_CMPL_V UINT32_C(0x1)
+ /*
+ * This error indicates that there was some sort of problem with the BDs
+ * for the packet.
+ */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /* No error */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
+ /* Bad Format: BDs were not formatted correctly. */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT
+ /*
+ * When this bit is '1', it indicates that the length of the packet was
+ * zero. No packet was transmitted.
+ */
+ #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
+ /*
+ * When this bit is '1', it indicates that the packet was longer than
+ * the programmed limit in TDI. No packet was transmitted.
+ */
+ #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20)
+ /*
+ * When this bit is '1', it indicates that one or more of the BDs
+ * associated with this packet generated a PCI error. This probably
+ * means the address was not valid.
+ */
+ #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
+ /*
+ * When this bit is '1', it indicates that the packet was longer than
+ * indicated by the hint. No packet was transmitted.
+ */
+ #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
+ /*
+ * When this bit is '1', it indicates that the packet was dropped due to
+ * Poison TLP error on one or more of the TLPs in the PXP completion.
+ */
+ #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
+ #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define TX_CMPL_ERRORS_SFT 1
+ uint16_t errors_v;
+
+ uint16_t unused_1;
+ uint32_t unused_2;
+} __attribute__((packed)) tx_cmpl_t, *ptx_cmpl_t;
+
+/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */
+struct rx_pkt_cmpl {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records.
+ */
+ #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PKT_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 completion: Completion of and L2 RX packet.
+ * Length = 32B
+ */
+ #define RX_PKT_CMPL_TYPE_RX_L2 (UINT32_C(0x11) << 0)
+ /*
+ * When this bit is '1', it indicates a packet that has an error of some
+ * type. Type of error is indicated in error_flags.
+ */
+ #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
+ /* Normal: Packet was placed using normal algorithm. */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7)
+ /* Jumbo: Packet was placed using jumbo algorithm. */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation: Packet was placed using Header/Data
+ * separation algorithm. The separation location is indicated by
+ * the itype field.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
+ /* This bit is '1' if the RSS field in this completion is valid. */
+ #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ /*
+ * This value indicates what the inner packet determined for the packet
+ * was.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
+ /* Not Known: Indicates that the packet type was not known. */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 12)
+ /*
+ * IP Packet: Indicates that the packet was an IP packet, but
+ * further classification was not possible.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12)
+ /*
+ * TCP Packet: Indicates that the packet was IP and TCP. This
+ * indicates that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ /*
+ * UDP Packet: Indicates that the packet was IP and UDP. This
+ * indicates that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12)
+ /*
+ * FCoE Packet: Indicates that the packet was recognized as a
+ * FCoE. This also indicates that the payload_offset field is
+ * valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12)
+ /*
+ * RoCE Packet: Indicates that the packet was recognized as a
+ * RoCE. This also indicates that the payload_offset field is
+ * valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12)
+ /*
+ * ICMP Packet: Indicates that the packet was recognized as
+ * ICMP. This indicates that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12)
+ /*
+ * PtP packet wo/timestamp: Indicates that the packet was
+ * recognized as a PtP packet.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \
+ (UINT32_C(0x8) << 12)
+ /*
+ * PtP packet w/timestamp: Indicates that the packet was
+ * recognized as a PtP packet and that a timestamp was taken for
+ * the packet.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PKT_CMPL_FLAGS_SFT 6
+ uint16_t flags_type;
+
+ /*
+ * This is the length of the data for the packet stored in the buffer(s)
+ * identified by the opaque value. This includes the packet BD and any
+ * associated buffer BDs. This does not include the the length of any
+ * data places in aggregation BDs.
+ */
+ uint16_t len;
+
+ /*
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
+ */
+ uint32_t opaque;
+
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ #define RX_PKT_CMPL_V1 UINT32_C(0x1)
+ /*
+ * This value is the number of aggregation buffers that follow this
+ * entry in the completion ring that are a part of this packet. If the
+ * value is zero, then the packet is completely contained in the buffer
+ * space provided for the packet in the RX ring.
+ */
+ #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
+ #define RX_PKT_CMPL_AGG_BUFS_SFT 1
+ uint8_t agg_bufs_v1;
+
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}.
+ */
+ uint8_t rss_hash_type;
+
+ /*
+ * This value indicates the offset from the beginning of the packet
+ * where the inner payload starts. This value is valid for TCP, UDP,
+ * FCoE, and RoCE packets.
+ */
+ uint8_t payload_offset;
+
+ uint8_t unused_1;
+
+ /*
+ * This value is the RSS hash value calculated for the packet based on
+ * the mode bits and key value in the VNIC.
+ */
+ uint32_t rss_hash;
+} __attribute__((packed));
+
+/* last 16 bytes of RX Packet Completion Record */
+struct rx_pkt_cmpl_hi {
+ /*
+ * This indicates that the ip checksum was calculated for the inner
+ * packet and that the ip_cs_error field indicates if there was an
+ * error.
+ */
+ #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ /*
+ * This indicates that the TCP, UDP or ICMP checksum was calculated for
+ * the inner packet and that the l4_cs_error field indicates if there
+ * was an error.
+ */
+ #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ /*
+ * This indicates that the ip checksum was calculated for the tunnel
+ * header and that the t_ip_cs_error field indicates if there was an
+ * error.
+ */
+ #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ /*
+ * This indicates that the UDP checksum was calculated for the tunnel
+ * packet and that the t_l4_cs_error field indicates if there was an
+ * error.
+ */
+ #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ /* This value indicates what format the metadata field is. */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and TPID value. -
+ * metadata[11:0] contains the vlan VID value. - metadata[12]
+ * contains the vlan DE value. - metadata[15:13] contains the
+ * vlan PRI value. - metadata[31:16] contains the vlan TPID
+ * value.
+ */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
+ RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
+ /*
+ * This field indicates the IP type for the inner-most IP header. A
+ * value of '0' indicates IPv4. A value of '1' indicates IPv6. This
+ * value is only valid if itype indicates a packet with an IP header.
+ */
+ #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
+ uint32_t flags2;
+
+ /*
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
+ */
+ /* When meta_format=1, this value is the VLAN VID. */
+ #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_PKT_CMPL_METADATA_VID_SFT 0
+ /* When meta_format=1, this value is the VLAN DE. */
+ #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_PKT_CMPL_METADATA_PRI_SFT 13
+ /* When meta_format=1, this value is the VLAN TPID. */
+ #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_PKT_CMPL_METADATA_TPID_SFT 16
+ uint32_t metadata;
+
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ #define RX_PKT_CMPL_V2 UINT32_C(0x1)
+ /*
+ * This error indicates that there was some sort of problem with the BDs
+ * for the packet that was found after part of the packet was already
+ * placed. The packet should be treated as invalid.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /* No buffer error */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \
+ (UINT32_C(0x0) << 1)
+ /*
+ * Did Not Fit: Packet did not fit into packet buffer provided.
+ * For regular placement, this means the packet did not fit in
+ * the buffer provided. For HDS and jumbo placement, this means
+ * that the packet could not be placed into 7 physical buffers
+ * or less.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \
+ (UINT32_C(0x1) << 1)
+ /*
+ * Not On Chip: All BDs needed for the packet were not on-chip
+ * when the packet arrived.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
+ (UINT32_C(0x2) << 1)
+ /* Bad Format: BDs were not formatted correctly. */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \
+ (UINT32_C(0x3) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT
+ /* This indicates that there was an error in the IP header checksum. */
+ #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10)
+ /*
+ * This indicates that there was an error in the TCP, UDP or ICMP
+ * checksum.
+ */
+ #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20)
+ /*
+ * This indicates that there was an error in the tunnel IP header
+ * checksum.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR UINT32_C(0x40)
+ /* This indicates that there was an error in the tunnel UDP checksum. */
+ #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR UINT32_C(0x80)
+ /*
+ * This indicates that there was a CRC error on either an FCoE or RoCE
+ * packet. The itype indicates the packet type.
+ */
+ #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100)
+ /*
+ * This indicates that there was an error in the tunnel portion of the
+ * packet when this field is non-zero.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9
+ /*
+ * No additional error occurred on the tunnel portion of the
+ * packet of the packet does not have a tunnel.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9)
+ /*
+ * Indicates that IP header version does not match expectation
+ * from L2 Ethertype for IPv4 and IPv6 in the tunnel header.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \
+ (UINT32_C(0x1) << 9)
+ /*
+ * Indicates that header length is out of range in the tunnel
+ * header. Valid for IPv4.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \
+ (UINT32_C(0x2) << 9)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the PPPoE header length for a tunnel PPPoE packet.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \
+ (UINT32_C(0x3) << 9)
+ /*
+ * Indicates that physical packet is shorter than that claimed
+ * by the tunnel l3 header length. Valid for IPv4, or IPv6
+ * tunnel packet packets.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \
+ (UINT32_C(0x4) << 9)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the tunnel UDP header length for a tunnel UDP
+ * packet that is not fragmented.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \
+ (UINT32_C(0x5) << 9)
+ /*
+ * indicates that the IPv4 TTL or IPv6 hop limit check have
+ * failed (e.g. TTL = 0) in the tunnel header. Valid for IPv4,
+ * and IPv6.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \
+ (UINT32_C(0x6) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
+ /*
+ * This indicates that there was an error in the inner portion of the
+ * packet when this field is non-zero.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12
+ /*
+ * No additional error occurred on the tunnel portion of the
+ * packet of the packet does not have a tunnel.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12)
+ /*
+ * Indicates that IP header version does not match expectation
+ * from L2 Ethertype for IPv4 and IPv6 or that option other than
+ * VFT was parsed on FCoE packet.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \
+ (UINT32_C(0x1) << 12)
+ /*
+ * indicates that header length is out of range. Valid for IPv4
+ * and RoCE
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \
+ (UINT32_C(0x2) << 12)
+ /*
+ * indicates that the IPv4 TTL or IPv6 hop limit check have
+ * failed (e.g. TTL = 0). Valid for IPv4, and IPv6
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12)
+ /*
+ * Indicates that physical packet is shorter than that claimed
+ * by the l3 header length. Valid for IPv4, IPv6 packet or RoCE
+ * packets.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \
+ (UINT32_C(0x4) << 12)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the UDP header length for a UDP packet that is not
+ * fragmented.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \
+ (UINT32_C(0x5) << 12)
+ /*
+ * Indicates that TCP header length > IP payload. Valid for TCP
+ * packets only.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \
+ (UINT32_C(0x6) << 12)
+ /* Indicates that TCP header length < 5. Valid for TCP. */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
+ (UINT32_C(0x7) << 12)
+ /*
+ * Indicates that TCP option headers result in a TCP header size
+ * that does not match data offset in TCP header. Valid for TCP.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
+ (UINT32_C(0x8) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define RX_PKT_CMPL_ERRORS_SFT 1
+ uint16_t errors_v2;
+
+ /*
+ * This field identifies the CFA action rule that was used for this
+ * packet.
+ */
+ uint16_t cfa_code;
+
+ /*
+ * This value holds the reordering sequence number for the packet. If
+ * the reordering sequence is not valid, then this value is zero. The
+ * reordering domain for the packet is in the bottom 8 to 10b of the
+ * rss_hash value. The bottom 20b of this value contain the ordering
+ * domain value for the packet.
+ */
+ #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
+ #define RX_PKT_CMPL_REORDER_SFT 0
+ uint32_t reorder;
+} __attribute__((packed));
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+ /* Length of forwarded request in bytes. */
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records.
+ */
+ #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
+ /* Forwarded HWRM Request */
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (UINT32_C(0x22) << 0)
+ /* Length of forwarded request in bytes. */
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ uint16_t req_len_type;
+
+ /*
+ * Source ID of this request. Typically used in forwarding requests and
+ * responses. 0x0 - 0xFFF8 - Used for function ids 0xFFF8 - 0xFFFE -
+ * Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t source_id;
+
+ uint32_t unused_0;
+
+ /* Address of forwarded request. */
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1)
+ /* Address of forwarded request. */
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+ uint64_t req_buf_addr_v;
+} __attribute__((packed));
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+ /*
+ * This field indicates the exact type of the completion. By convention,
+ * the LSB identifies the length of the record in 16B units. Even values
+ * indicate 16B records. Odd values indicate 32B records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT \
+ (UINT32_C(0x2e) << 0)
+ uint16_t type;
+
+ /* Identifiers of events. */
+ /* Link status changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ (UINT32_C(0x0) << 0)
+ /* Link MTU changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \
+ (UINT32_C(0x1) << 0)
+ /* Link speed changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \
+ (UINT32_C(0x2) << 0)
+ /* DCB Configuration changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \
+ (UINT32_C(0x3) << 0)
+ /* Port connection not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ (UINT32_C(0x4) << 0)
+ /* Link speed configuration was not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
+ (UINT32_C(0x5) << 0)
+ /* Function driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \
+ (UINT32_C(0x10) << 0)
+ /* Function driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \
+ (UINT32_C(0x11) << 0)
+ /* PF driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ (UINT32_C(0x20) << 0)
+ /* PF driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \
+ (UINT32_C(0x21) << 0)
+ /* VF Function Level Reset (FLR) */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (UINT32_C(0x30) << 0)
+ /* VF MAC Address Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \
+ (UINT32_C(0x31) << 0)
+ /* PF-VF communication channel status change. */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
+ (UINT32_C(0x32) << 0)
+ /* HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \
+ (UINT32_C(0xff) << 0)
+ uint16_t event_id;
+
+ /* Event specific data */
+ uint32_t event_data2;
+
+ /* opaque is 7 b */
+ /*
+ * This value is written by the NIC such that it will be different for
+ * each pass through the completion queue. The even passes will write 1.
+ * The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ uint8_t opaque_v;
+
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+
+ /* Event specific data */
+ uint32_t event_data1;
+} __attribute__((packed));
+
+/*
+ * Note: The Hardware Resource Manager (HWRM) manages various hardware resources
+ * inside the chip. The HWRM is implemented in firmware, and runs on embedded
+ * processors inside the chip. This firmware is vital part of the chip's
+ * hardware. The chip can not be used by driver without it.
+ */
+
+/* Input (16 bytes) */
+struct input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* Output (8 bytes) */
+struct output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_filter_alloc */
+/*
+ * A filter is used to identify traffic that contains a matching set of
+ * parameters like unicast or broadcast MAC address or a VLAN tag amongst
+ * other things which then allows the ASIC to direct the incoming traffic
+ * to an appropriate VNIC or Rx ring.
+ */
+
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * Enumeration denoting the RX, TX type of the resource. This
+ * enumeration is used for resources that are similar for both TX and RX
+ * paths of the chip.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
+ (UINT32_C(0x0) << 0)
+ /* rx path */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
+ (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
+ /*
+ * Setting of this flag indicates the applicability to the loopback
+ * path.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \
+ UINT32_C(0x4)
+ /*
+ * If this flag is set, all t_l2_* fields are invalid and they should
+ * not be specified. If this flag is set, then l2_* fields refer to
+ * fields of outermost L2 header.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \
+ UINT32_C(0x8)
+ uint32_t flags;
+
+ /* This bit must be '1' for the l2_addr field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \
+ UINT32_C(0x1)
+ /* This bit must be '1' for the l2_addr_mask field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
+ UINT32_C(0x2)
+ /* This bit must be '1' for the l2_ovlan field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \
+ UINT32_C(0x4)
+ /* This bit must be '1' for the l2_ovlan_mask field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
+ UINT32_C(0x8)
+ /* This bit must be '1' for the l2_ivlan field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \
+ UINT32_C(0x10)
+ /* This bit must be '1' for the l2_ivlan_mask field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
+ UINT32_C(0x20)
+ /* This bit must be '1' for the t_l2_addr field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the t_l2_addr_mask field to be configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
+ UINT32_C(0x80)
+ /* This bit must be '1' for the t_l2_ovlan field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the t_l2_ovlan_mask field to be configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
+ UINT32_C(0x200)
+ /* This bit must be '1' for the t_l2_ivlan field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the t_l2_ivlan_mask field to be configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
+ UINT32_C(0x800)
+ /* This bit must be '1' for the src_type field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \
+ UINT32_C(0x1000)
+ /* This bit must be '1' for the src_id field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \
+ UINT32_C(0x2000)
+ /* This bit must be '1' for the tunnel_type field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x4000)
+ /* This bit must be '1' for the dst_id field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x10000)
+ uint32_t enables;
+
+ /*
+ * This value sets the match value for the L2 MAC address. Destination
+ * MAC address for RX path. Source MAC address for TX path.
+ */
+ uint8_t l2_addr[6];
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+
+ /*
+ * This value sets the mask value for the L2 address. A value of 0 will
+ * mask the corresponding bit from compare.
+ */
+ uint8_t l2_addr_mask[6];
+
+ /* This value sets VLAN ID value for outer VLAN. */
+ uint16_t l2_ovlan;
+
+ /*
+ * This value sets the mask value for the ovlan id. A value of 0 will
+ * mask the corresponding bit from compare.
+ */
+ uint16_t l2_ovlan_mask;
+
+ /* This value sets VLAN ID value for inner VLAN. */
+ uint16_t l2_ivlan;
+
+ /*
+ * This value sets the mask value for the ivlan id. A value of 0 will
+ * mask the corresponding bit from compare.
+ */
+ uint16_t l2_ivlan_mask;
+
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This value sets the match value for the tunnel L2 MAC address.
+ * Destination MAC address for RX path. Source MAC address for TX path.
+ */
+ uint8_t t_l2_addr[6];
+
+ uint8_t unused_4;
+ uint8_t unused_5;
+
+ /*
+ * This value sets the mask value for the tunnel L2 address. A value of
+ * 0 will mask the corresponding bit from compare.
+ */
+ uint8_t t_l2_addr_mask[6];
+
+ /* This value sets VLAN ID value for tunnel outer VLAN. */
+ uint16_t t_l2_ovlan;
+
+ /*
+ * This value sets the mask value for the tunnel ovlan id. A value of 0
+ * will mask the corresponding bit from compare.
+ */
+ uint16_t t_l2_ovlan_mask;
+
+ /* This value sets VLAN ID value for tunnel inner VLAN. */
+ uint16_t t_l2_ivlan;
+
+ /*
+ * This value sets the mask value for the tunnel ivlan id. A value of 0
+ * will mask the corresponding bit from compare.
+ */
+ uint16_t t_l2_ivlan_mask;
+
+ /* This value identifies the type of source of the packet. */
+ /* Network port */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT \
+ (UINT32_C(0x0) << 0)
+ /* Physical function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF \
+ (UINT32_C(0x1) << 0)
+ /* Virtual function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF \
+ (UINT32_C(0x2) << 0)
+ /* Virtual NIC of a function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC \
+ (UINT32_C(0x3) << 0)
+ /* Embedded processor for CFA management */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG \
+ (UINT32_C(0x4) << 0)
+ /* Embedded processor for OOB management */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE \
+ (UINT32_C(0x5) << 0)
+ /* Embedded processor for RoCE */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO \
+ (UINT32_C(0x6) << 0)
+ /* Embedded processor for network proxy functions */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG \
+ (UINT32_C(0x7) << 0)
+ uint8_t src_type;
+
+ uint8_t unused_6;
+ /*
+ * This value is the id of the source. For a network port, it represents
+ * port_id. For a physical function, it represents fid. For a virtual
+ * function, it represents vf_id. For a vnic, it represents vnic_id. For
+ * embedded processors, this id is not valid. Notes: 1. The function ID
+ * is implied if it src_id is not provided for a src_type that is either
+ */
+ uint32_t src_id;
+
+ /* Tunnel Type. */
+ /* Non-tunnel */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ (UINT32_C(0x0) << 0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Network Virtualization Generic Routing Encapsulation (NVGRE)
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ (UINT32_C(0x2) << 0)
+ /*
+ * Generic Routing Encapsulation (GRE) inside Ethernet payload
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ (UINT32_C(0x3) << 0)
+ /* IP in IP */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ (UINT32_C(0x4) << 0)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ (UINT32_C(0x5) << 0)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ (UINT32_C(0x6) << 0)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ (UINT32_C(0x7) << 0)
+ /*
+ * Generic Routing Encapsulation (GRE) inside IP datagram
+ * payload
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ (UINT32_C(0x8) << 0)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ (UINT32_C(0xff) << 0)
+ uint8_t tunnel_type;
+
+ uint8_t unused_7;
+
+ /*
+ * If set, this value shall represent the Logical VNIC ID of the
+ * destination VNIC for the RX path and network port id of the
+ * destination port for the TX path.
+ */
+ uint16_t dst_id;
+
+ /* Logical VNIC ID of the VNIC where traffic is mirrored. */
+ uint16_t mirror_vnic_id;
+
+ /*
+ * This hint is provided to help in placing the filter in the filter
+ * table.
+ */
+ /* No preference */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
+ (UINT32_C(0x0) << 0)
+ /* Above the given filter */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \
+ (UINT32_C(0x1) << 0)
+ /* Below the given filter */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \
+ (UINT32_C(0x2) << 0)
+ /* As high as possible */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \
+ (UINT32_C(0x3) << 0)
+ /* As low as possible */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \
+ (UINT32_C(0x4) << 0)
+ uint8_t pri_hint;
+
+ uint8_t unused_8;
+ uint32_t unused_9;
+
+ /*
+ * This is the ID of the filter that goes along with the pri_hint. This
+ * field is valid only for the following values. 1 - Above the given
+ * filter 2 - Below the given filter
+ */
+ uint64_t l2_filter_id_hint;
+} __attribute__((packed));
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+
+ /*
+ * This is the ID of the flow associated with this filter. This value
+ * shall be used to match and associate the flow identifier returned in
+ * completion records. A value of 0xFFFFFFFF shall indicate no flow id.
+ */
+ uint32_t flow_id;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_filter_free */
+/*
+ * Description: Free a L2 filter. The HWRM shall free all associated filter
+ * resources with the L2 filter.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Description: This command will set rx mask of the function. */
+
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* VNIC ID */
+ uint32_t vnic_id;
+
+ /* Reserved for future use. */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1)
+ /*
+ * When this bit is '1', the function is requested to accept multi-cast
+ * packets specified by the multicast addr table.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2)
+ /*
+ * When this bit is '1', the function is requested to accept all multi-
+ * cast packets.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4)
+ /*
+ * When this bit is '1', the function is requested to accept broadcast
+ * packets.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8)
+ /*
+ * When this bit is '1', the function is requested to be put in the
+ * promiscuous mode. The HWRM should accept any function to set up
+ * promiscuous mode. The HWRM shall follow the semantics below for the
+ * promiscuous mode support. # When partitioning is not enabled on a
+ * port (i.e. single PF on the port), then the PF shall be allowed to be
+ * in the promiscuous mode. When the PF is in the promiscuous mode, then
+ * it shall receive all host bound traffic on that port. # When
+ * partitioning is enabled on a port (i.e. multiple PFs per port) and a
+ * PF on that port is in the promiscuous mode, then the PF receives all
+ * traffic within that partition as identified by a unique identifier
+ * for the PF (e.g. S-Tag). If a unique outer VLAN for the PF is
+ * specified, then the setting of promiscuous mode on that PF shall
+ * result in the PF receiving all host bound traffic with matching outer
+ * VLAN. # A VF shall can be set in the promiscuous mode. In the
+ * promiscuous mode, the VF does not receive any traffic unless a unique
+ * outer VLAN for the VF is specified. If a unique outer VLAN for the VF
+ * is specified, then the setting of promiscuous mode on that VF shall
+ * result in the VF receiving all host bound traffic with the matching
+ * outer VLAN. # The HWRM shall allow the setting of promiscuous mode on
+ * a function independently from the promiscuous mode settings on other
+ * functions.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
+ /*
+ * If this flag is set, the corresponding RX filters shall be set up to
+ * cover multicast/broadcast filters for the outermost Layer 2
+ * destination MAC address field.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20)
+ uint32_t mask;
+
+ /* This is the address for mcast address tbl. */
+ uint64_t mc_tbl_addr;
+
+ /*
+ * This value indicates how many entries in mc_tbl are valid. Each entry
+ * is 6 bytes.
+ */
+ uint32_t num_mc_entries;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_exec_fwd_resp */
+/*
+ * Description: This command is used to send an encapsulated request to the
+ * HWRM. This command instructs the HWRM to execute the request and forward the
+ * response of the encapsulated request to the location specified in the
+ * original request that is encapsulated. The target id of this command shall be
+ * set to 0xFFFF (HWRM). The response location in this command shall be used to
+ * acknowledge the receipt of the encapsulated request and forwarding of the
+ * response.
+ */
+
+/* Input (128 bytes) */
+struct hwrm_exec_fwd_resp_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * This is an encapsulated request. This request should be executed by
+ * the HWRM and the response should be provided in the response buffer
+ * inside the encapsulated request.
+ */
+ uint32_t encap_request[26];
+
+ /*
+ * This value indicates the target id of the response to the
+ * encapsulated request. 0x0 - 0xFFF8 - Used for function ids 0xFFF8 -
+ * 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_func_qcaps */
+/*
+ * Description: This command returns capabilities of a function. The input FID
+ * value is used to indicate what function is being queried. This allows a
+ * physical function driver to query virtual functions that are children of the
+ * physical function. The output FID value is needed to configure Rings and
+ * MSI-X vectors so their DMA operations appear correctly on the PCI bus.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * Function ID of the function that is being queried. 0xFF... (All Fs)
+ * if the query is for the requesting function.
+ */
+ uint16_t fid;
+
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /*
+ * FID value. This value is used to identify operations on the PCI bus
+ * as belonging to a particular PCI function.
+ */
+ uint16_t fid;
+
+ /*
+ * Port ID of port that this function is associated with. Valid only for
+ * the PF. 0xFF... (All Fs) if this function is not associated with any
+ * port. 0xFF... (All Fs) if this function is called from a VF.
+ */
+ uint16_t port_id;
+
+ /* If 1, then Push mode is supported on this function. */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
+ /*
+ * If 1, then the global MSI-X auto-masking is enabled for the device.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
+ UINT32_C(0x2)
+ /*
+ * If 1, then the Precision Time Protocol (PTP) processing is supported
+ * on this function. The HWRM should enable PTP on only a single
+ * Physical Function (PF) per port.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4)
+ uint32_t flags;
+
+ /*
+ * This value is current MAC address configured for this function. A
+ * value of 00-00-00-00-00-00 indicates no MAC address is currently
+ * configured.
+ */
+ uint8_t perm_mac_address[6];
+
+ /*
+ * The maximum number of RSS/COS contexts that can be allocated to the
+ * function.
+ */
+ uint16_t max_rsscos_ctx;
+
+ /*
+ * The maximum number of completion rings that can be allocated to the
+ * function.
+ */
+ uint16_t max_cmpl_rings;
+
+ /*
+ * The maximum number of transmit rings that can be allocated to the
+ * function.
+ */
+ uint16_t max_tx_rings;
+
+ /*
+ * The maximum number of receive rings that can be allocated to the
+ * function.
+ */
+ uint16_t max_rx_rings;
+
+ /*
+ * The maximum number of L2 contexts that can be allocated to the
+ * function.
+ */
+ uint16_t max_l2_ctxs;
+
+ /* The maximum number of VNICs that can be allocated to the function. */
+ uint16_t max_vnics;
+
+ /*
+ * The identifier for the first VF enabled on a PF. This is valid only
+ * on the PF with SR-IOV enabled. 0xFF... (All Fs) if this command is
+ * called on a PF with SR-IOV disabled or on a VF.
+ */
+ uint16_t first_vf_id;
+
+ /*
+ * The maximum number of VFs that can be allocated to the function. This
+ * is valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if this
+ * command is called on a PF with SR-IOV disabled or on a VF.
+ */
+ uint16_t max_vfs;
+
+ /*
+ * The maximum number of statistic contexts that can be allocated to the
+ * function.
+ */
+ uint16_t max_stat_ctx;
+
+ /*
+ * The maximum number of Encapsulation records that can be offloaded by
+ * this function.
+ */
+ uint32_t max_encap_records;
+
+ /*
+ * The maximum number of decapsulation records that can be offloaded by
+ * this function.
+ */
+ uint32_t max_decap_records;
+
+ /*
+ * The maximum number of Exact Match (EM) flows that can be offloaded by
+ * this function on the TX side.
+ */
+ uint32_t max_tx_em_flows;
+
+ /*
+ * The maximum number of Wildcard Match (WM) flows that can be offloaded
+ * by this function on the TX side.
+ */
+ uint32_t max_tx_wm_flows;
+
+ /*
+ * The maximum number of Exact Match (EM) flows that can be offloaded by
+ * this function on the RX side.
+ */
+ uint32_t max_rx_em_flows;
+
+ /*
+ * The maximum number of Wildcard Match (WM) flows that can be offloaded
+ * by this function on the RX side.
+ */
+ uint32_t max_rx_wm_flows;
+
+ /*
+ * The maximum number of multicast filters that can be supported by this
+ * function on the RX side.
+ */
+ uint32_t max_mcast_filters;
+
+ /*
+ * The maximum value of flow_id that can be supported in completion
+ * records.
+ */
+ uint32_t max_flow_id;
+
+ /*
+ * The maximum number of HW ring groups that can be supported on this
+ * function.
+ */
+ uint32_t max_hw_ring_grps;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_func_reset */
+/*
+ * Description: This command resets a hardware function (PCIe function) and
+ * frees any resources used by the function. This command shall be initiated by
+ * the driver after an FLR has occurred to prepare the function for re-use. This
+ * command may also be initiated by a driver prior to doing it's own
+ * configuration. This command puts the function into the reset state. In the
+ * reset state, global and port related features of the chip are not available.
+ */
+/*
+ * Note: This command will reset a function that has already been disabled or
+ * idled. The command returns all the resources owned by the function so a new
+ * driver may allocate and configure resources normally.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* This bit must be '1' for the vf_id_valid field to be configured. */
+ #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID \
+ UINT32_C(0x1)
+ uint32_t enables;
+
+ /*
+ * The ID of the VF that this PF is trying to reset. Only the parent PF
+ * shall be allowed to reset a child VF. A parent PF driver shall use
+ * this field only when a specific child VF is requested to be reset.
+ */
+ uint16_t vf_id;
+
+ /* This value indicates the level of a function reset. */
+ /*
+ * Reset the caller function and its children VFs (if any). If
+ * no children functions exist, then reset the caller function
+ * only.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \
+ (UINT32_C(0x0) << 0)
+ /* Reset the caller function only */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Reset all children VFs of the caller function driver if the
+ * caller is a PF driver. It is an error to specify this level
+ * by a VF driver. It is an error to specify this level by a PF
+ * driver with no children VFs.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
+ (UINT32_C(0x2) << 0)
+ /*
+ * Reset a specific VF of the caller function driver if the
+ * caller is the parent PF driver. It is an error to specify
+ * this level by a VF driver. It is an error to specify this
+ * level by a PF driver that is not the parent of the VF that is
+ * being requested to reset.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \
+ (UINT32_C(0x3) << 0)
+ uint8_t func_reset_level;
+
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_port_phy_cfg */
+/*
+ * Description: This command configures the PHY device for the port. It allows
+ * setting of the most generic settings for the PHY. The HWRM shall complete
+ * this command as soon as PHY settings are configured. They may not be applied
+ * when the command response is provided. A VF driver shall not be allowed to
+ * configure PHY using this command. In a network partition mode, a PF driver
+ * shall not be allowed to configure PHY using this command.
+ */
+
+/* Input (56 bytes) */
+struct hwrm_port_phy_cfg_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * When this bit is set to '1', the PHY for the port shall be reset. #
+ * If this bit is set to 1, then the HWRM shall reset the PHY after
+ * applying PHY configuration changes specified in this command. # In
+ * order to guarantee that PHY configuration changes specified in this
+ * command take effect, the HWRM client should set this flag to 1. # If
+ * this bit is not set to 1, then the HWRM may reset the PHY depending
+ * on the current PHY configuration and settings specified in this
+ * command.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the link shall be forced to be taken
+ * down. # When this bit is set to '1", all other command input settings
+ * related to the link speed shall be ignored. Once the link state is
+ * forced down, it can be explicitly cleared from that state by setting
+ * this flag to '0'. # If this flag is set to '0', then the link shall
+ * be cleared from forced down state if the link is in forced down
+ * state. There may be conditions (e.g. out-of-band or sideband
+ * configuration changes for the link) outside the scope of the HWRM
+ * implementation that may clear forced down link state.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', the link shall be forced to the
+ * force_link_speed value. When this bit is set to '1', the HWRM client
+ * should not enable any of the auto negotiation related fields
+ * represented by auto_XXX fields in this command. When this bit is set
+ * to '1' and the HWRM client has enabled a auto_XXX field in this
+ * command, then the HWRM shall ignore the enabled auto_XXX field. When
+ * this bit is set to zero, the link shall be allowed to autoneg.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', the auto-negotiation process shall be
+ * restarted on the link.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', Energy Efficient Ethernet (EEE) is
+ * requested to be enabled on this link. If EEE is not supported on this
+ * port, then this flag shall be ignored by the HWRM.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', Energy Efficient Ethernet (EEE) is
+ * requested to be disabled on this link. If EEE is not supported on
+ * this port, then this flag shall be ignored by the HWRM.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE UINT32_C(0x20)
+ /*
+ * When this bit is set to '1' and EEE is enabled on this link, then TX
+ * LPI is requested to be enabled on the link. If EEE is not supported
+ * on this port, then this flag shall be ignored by the HWRM. If EEE is
+ * disabled on this port, then this flag shall be ignored by the HWRM.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI UINT32_C(0x40)
+ uint32_t flags;
+
+ /* This bit must be '1' for the auto_mode field to be configured. */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1)
+ /* This bit must be '1' for the auto_duplex field to be configured. */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2)
+ /* This bit must be '1' for the auto_pause field to be configured. */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the auto_link_speed field to be configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the auto_link_speed_mask field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
+ UINT32_C(0x10)
+ /* This bit must be '1' for the wirespeed field to be configured. */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED UINT32_C(0x20)
+ /* This bit must be '1' for the lpbk field to be configured. */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40)
+ /* This bit must be '1' for the preemphasis field to be configured. */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS UINT32_C(0x80)
+ /* This bit must be '1' for the force_pause field to be configured. */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the eee_link_speed_mask field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
+ UINT32_C(0x200)
+ /* This bit must be '1' for the tx_lpi_timer field to be configured. */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400)
+ uint32_t enables;
+
+ /* Port ID of port that is to be configured. */
+ uint16_t port_id;
+
+ /*
+ * This is the speed that will be used if the force bit is '1'. If
+ * unsupported speed is selected, an error will be generated.
+ */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB \
+ (UINT32_C(0x1) << 0)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB \
+ (UINT32_C(0xa) << 0)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB \
+ (UINT32_C(0x14) << 0)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB \
+ (UINT32_C(0x19) << 0)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB \
+ (UINT32_C(0x64) << 0)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB \
+ (UINT32_C(0xc8) << 0)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB \
+ (UINT32_C(0xfa) << 0)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB \
+ (UINT32_C(0x190) << 0)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB \
+ (UINT32_C(0x1f4) << 0)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB \
+ (UINT32_C(0x3e8) << 0)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB \
+ (UINT32_C(0xffff) << 0)
+ uint16_t force_link_speed;
+
+ /*
+ * This value is used to identify what autoneg mode is used when the
+ * link speed is not being forced.
+ */
+ /*
+ * Disable autoneg or autoneg disabled. No speeds are selected.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE (UINT32_C(0x0) << 0)
+ /* Select all possible speeds for autoneg mode. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Select only the auto_link_speed speed for autoneg mode. This
+ * mode has been DEPRECATED. An HWRM client should not use this
+ * mode.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED \
+ (UINT32_C(0x2) << 0)
+ /*
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should
+ * not use this mode.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW \
+ (UINT32_C(0x3) << 0)
+ /*
+ * Select the speeds based on the corresponding link speed mask
+ * value that is provided.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK \
+ (UINT32_C(0x4) << 0)
+ uint8_t auto_mode;
+
+ /*
+ * This is the duplex setting that will be used if the autoneg_mode is
+ * "one_speed" or "one_or_below".
+ */
+ /* Half Duplex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF \
+ (UINT32_C(0x0) << 0)
+ /* Full duplex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL \
+ (UINT32_C(0x1) << 0)
+ /* Both Half and Full dupex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH \
+ (UINT32_C(0x2) << 0)
+ uint8_t auto_duplex;
+
+ /*
+ * This value is used to configure the pause that will be used for
+ * autonegotiation. Add text on the usage of auto_pause and force_pause.
+ */
+ /*
+ * When this bit is '1', Generation of tx pause messages has been
+ * requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages has been
+ * requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+ /*
+ * When set to 1, the advertisement of pause is enabled. # When the
+ * auto_mode is not set to none and this flag is set to 1, then the
+ * auto_pause bits on this port are being advertised and autoneg pause
+ * results are being interpreted. # When the auto_mode is not set to
+ * none and this flag is set to 0, the pause is forced as indicated in
+ * force_pause, and also advertised as auto_pause bits, but the autoneg
+ * results are not interpreted since the pause configuration is being
+ * forced. # When the auto_mode is set to none and this flag is set to
+ * 1, auto_pause bits should be ignored and should be set to 0.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
+ uint8_t auto_pause;
+
+ uint8_t unused_0;
+
+ /*
+ * This is the speed that will be used if the autoneg_mode is
+ * "one_speed" or "one_or_below". If an unsupported speed is selected,
+ * an error will be generated.
+ */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB \
+ (UINT32_C(0x1) << 0)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB \
+ (UINT32_C(0xa) << 0)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB \
+ (UINT32_C(0x14) << 0)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB \
+ (UINT32_C(0x19) << 0)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB \
+ (UINT32_C(0x64) << 0)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB \
+ (UINT32_C(0xc8) << 0)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB \
+ (UINT32_C(0xfa) << 0)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB \
+ (UINT32_C(0x190) << 0)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB \
+ (UINT32_C(0x1f4) << 0)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB \
+ (UINT32_C(0x3e8) << 0)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB \
+ (UINT32_C(0xffff) << 0)
+ uint16_t auto_link_speed;
+
+ /*
+ * This is a mask of link speeds that will be used if autoneg_mode is
+ * "mask". If unsupported speed is enabled an error will be generated.
+ */
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
+ UINT32_C(0x10)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
+ UINT32_C(0x2000)
+ uint16_t auto_link_speed_mask;
+
+ /* This value controls the wirespeed feature. */
+ /* Wirespeed feature is disabled. */
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF (UINT32_C(0x0) << 0)
+ /* Wirespeed feature is enabled. */
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON (UINT32_C(0x1) << 0)
+ uint8_t wirespeed;
+
+ /* This value controls the loopback setting for the PHY. */
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE (UINT32_C(0x0) << 0)
+ /*
+ * The HW will be configured with local loopback such that host
+ * data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL (UINT32_C(0x1) << 0)
+ /*
+ * The HW will be configured with remote loopback such that port
+ * logic will send packets back out the transmitter that are
+ * received.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE (UINT32_C(0x2) << 0)
+ uint8_t lpbk;
+
+ /*
+ * This value is used to configure the pause that will be used for force
+ * mode.
+ */
+ /*
+ * When this bit is '1', Generation of tx pause messages is supported.
+ * Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages is supported.
+ * Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+ uint8_t force_pause;
+
+ uint8_t unused_1;
+
+ /*
+ * This value controls the pre-emphasis to be used for the link. Driver
+ * should not set this value (use enable.preemphasis = 0) unless driver
+ * is sure of setting. Normally HWRM FW will determine proper pre-
+ * emphasis.
+ */
+ uint32_t preemphasis;
+
+ /*
+ * Setting for link speed mask that is used to advertise speeds during
+ * autonegotiation when EEE is enabled. This field is valid only when
+ * EEE is enabled. The speeds specified in this field shall be a subset
+ * of speeds specified in auto_link_speed_mask. If EEE is enabled,then
+ * at least one speed shall be provided in this mask.
+ */
+ /* Reserved */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
+ /* Reserved */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+ /* Reserved */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ uint16_t eee_link_speed_mask;
+
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * Reuested setting of TX LPI timer in microseconds. This field is valid
+ * only when EEE is enabled and TX LPI is enabled.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
+ uint32_t tx_lpi_timer;
+
+ uint32_t unused_4;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_port_phy_qcfg */
+/* Description: This command queries the PHY configuration for the port. */
+/* Input (24 bytes) */
+
+struct hwrm_port_phy_qcfg_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* Port ID of port that is to be queried. */
+ uint16_t port_id;
+
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (96 bytes) */
+struct hwrm_port_phy_qcfg_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /* This value indicates the current link status. */
+ /* There is no link or cable detected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK (UINT32_C(0x0) << 0)
+ /* There is no link, but a cable has been detected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL (UINT32_C(0x1) << 0)
+ /* There is a link. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK (UINT32_C(0x2) << 0)
+ uint8_t link;
+
+ uint8_t unused_0;
+
+ /* This value indicates the current link speed of the connection. */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB \
+ (UINT32_C(0x1) << 0)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB \
+ (UINT32_C(0xa) << 0)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB \
+ (UINT32_C(0x14) << 0)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB \
+ (UINT32_C(0x19) << 0)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB \
+ (UINT32_C(0x64) << 0)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB \
+ (UINT32_C(0xc8) << 0)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB \
+ (UINT32_C(0xfa) << 0)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB \
+ (UINT32_C(0x190) << 0)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB \
+ (UINT32_C(0x1f4) << 0)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB \
+ (UINT32_C(0x3e8) << 0)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB \
+ (UINT32_C(0xffff) << 0)
+ uint16_t link_speed;
+
+ /* This value is indicates the duplex of the current connection. */
+ /* Half Duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_HALF (UINT32_C(0x0) << 0)
+ /* Full duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_FULL (UINT32_C(0x1) << 0)
+ uint8_t duplex;
+
+ /*
+ * This value is used to indicate the current pause configuration. When
+ * autoneg is enabled, this value represents the autoneg results of
+ * pause configuration.
+ */
+ /*
+ * When this bit is '1', Generation of tx pause messages is supported.
+ * Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages is supported.
+ * Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2)
+ uint8_t pause;
+
+ /*
+ * The supported speeds for the port. This is a bit mask. For each speed
+ * that is supported, the corrresponding bit will be set to '1'.
+ */
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \
+ UINT32_C(0x10)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \
+ UINT32_C(0x2000)
+ uint16_t support_speeds;
+
+ /*
+ * Current setting of forced link speed. When the link speed is not
+ * being forced, this value shall be set to 0.
+ */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB \
+ (UINT32_C(0x1) << 0)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB \
+ (UINT32_C(0xa) << 0)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB \
+ (UINT32_C(0x14) << 0)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB \
+ (UINT32_C(0x19) << 0)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB \
+ (UINT32_C(0x64) << 0)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB \
+ (UINT32_C(0xc8) << 0)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB \
+ (UINT32_C(0xfa) << 0)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \
+ (UINT32_C(0x190) << 0)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \
+ (UINT32_C(0x1f4) << 0)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \
+ (UINT32_C(0x3e8) << 0)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \
+ (UINT32_C(0xffff) << 0)
+ uint16_t force_link_speed;
+
+ /* Current setting of auto negotiation mode. */
+ /*
+ * Disable autoneg or autoneg disabled. No speeds are selected.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE \
+ (UINT32_C(0x0) << 0)
+ /* Select all possible speeds for autoneg mode. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Select only the auto_link_speed speed for autoneg mode. This
+ * mode has been DEPRECATED. An HWRM client should not use this
+ * mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED \
+ (UINT32_C(0x2) << 0)
+ /*
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should
+ * not use this mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW \
+ (UINT32_C(0x3) << 0)
+ /*
+ * Select the speeds based on the corresponding link speed mask
+ * value that is provided.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK \
+ (UINT32_C(0x4) << 0)
+ uint8_t auto_mode;
+
+ /*
+ * Current setting of pause autonegotiation. Move autoneg_pause flag
+ * here.
+ */
+ /*
+ * When this bit is '1', Generation of tx pause messages has been
+ * requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages has been
+ * requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+ /*
+ * When set to 1, the advertisement of pause is enabled. # When the
+ * auto_mode is not set to none and this flag is set to 1, then the
+ * auto_pause bits on this port are being advertised and autoneg pause
+ * results are being interpreted. # When the auto_mode is not set to
+ * none and this flag is set to 0, the pause is forced as indicated in
+ * force_pause, and also advertised as auto_pause bits, but the autoneg
+ * results are not interpreted since the pause configuration is being
+ * forced. # When the auto_mode is set to none and this flag is set to
+ * 1, auto_pause bits should be ignored and should be set to 0.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \
+ UINT32_C(0x4)
+ uint8_t auto_pause;
+
+ /*
+ * Current setting for auto_link_speed. This field is only valid when
+ * auto_mode is set to "one_speed" or "one_or_below".
+ */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB \
+ (UINT32_C(0x1) << 0)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB \
+ (UINT32_C(0xa) << 0)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB \
+ (UINT32_C(0x14) << 0)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB \
+ (UINT32_C(0x19) << 0)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB \
+ (UINT32_C(0x64) << 0)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB \
+ (UINT32_C(0xc8) << 0)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB \
+ (UINT32_C(0xfa) << 0)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB \
+ (UINT32_C(0x190) << 0)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB \
+ (UINT32_C(0x1f4) << 0)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB \
+ (UINT32_C(0x3e8) << 0)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \
+ (UINT32_C(0xffff) << 0)
+ uint16_t auto_link_speed;
+
+ /*
+ * Current setting for auto_link_speed_mask that is used to advertise
+ * speeds during autonegotiation. This field is only valid when
+ * auto_mode is set to "mask". The speeds specified in this field shall
+ * be a subset of supported speeds on this port.
+ */
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
+ UINT32_C(0x10)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
+ UINT32_C(0x2000)
+ uint16_t auto_link_speed_mask;
+
+ /* Current setting for wirespeed. */
+ /* Wirespeed feature is disabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF (UINT32_C(0x0) << 0)
+ /* Wirespeed feature is enabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON (UINT32_C(0x1) << 0)
+ uint8_t wirespeed;
+
+ /* Current setting for loopback. */
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE (UINT32_C(0x0) << 0)
+ /*
+ * The HW will be configured with local loopback such that host
+ * data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL (UINT32_C(0x1) << 0)
+ /*
+ * The HW will be configured with remote loopback such that port
+ * logic will send packets back out the transmitter that are
+ * received.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE (UINT32_C(0x2) << 0)
+ uint8_t lpbk;
+
+ /*
+ * Current setting of forced pause. When the pause configuration is not
+ * being forced, then this value shall be set to 0.
+ */
+ /*
+ * When this bit is '1', Generation of tx pause messages is supported.
+ * Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages is supported.
+ * Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX \
+ UINT32_C(0x2)
+ uint8_t force_pause;
+
+ /*
+ * This value indicates the current status of the optics module on this
+ * port.
+ */
+ /* Module is inserted and accepted */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \
+ (UINT32_C(0x0) << 0)
+ /* Module is rejected and transmit side Laser is disabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \
+ (UINT32_C(0x1) << 0)
+ /* Module mismatch warning. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \
+ (UINT32_C(0x2) << 0)
+ /* Module is rejected and powered down. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \
+ (UINT32_C(0x3) << 0)
+ /* Module is not inserted. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
+ (UINT32_C(0x4) << 0)
+ /* Module status is not applicable. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
+ (UINT32_C(0xff) << 0)
+ uint8_t module_status;
+
+ /* Current setting for preemphasis. */
+ uint32_t preemphasis;
+
+ /* This field represents the major version of the PHY. */
+ uint8_t phy_maj;
+
+ /* This field represents the minor version of the PHY. */
+ uint8_t phy_min;
+
+ /* This field represents the build version of the PHY. */
+ uint8_t phy_bld;
+
+ /* This value represents a PHY type. */
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \
+ (UINT32_C(0x0) << 0)
+ /* BASE-CR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \
+ (UINT32_C(0x1) << 0)
+ /* BASE-KR4 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \
+ (UINT32_C(0x2) << 0)
+ /* BASE-LR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \
+ (UINT32_C(0x3) << 0)
+ /* BASE-SR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \
+ (UINT32_C(0x4) << 0)
+ /* BASE-KR2 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \
+ (UINT32_C(0x5) << 0)
+ /* BASE-KX */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \
+ (UINT32_C(0x6) << 0)
+ /* BASE-KR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \
+ (UINT32_C(0x7) << 0)
+ /* BASE-T */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \
+ (UINT32_C(0x8) << 0)
+ /* EEE capable BASE-T */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \
+ (UINT32_C(0x9) << 0)
+ /* SGMII connected external PHY */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \
+ (UINT32_C(0xa) << 0)
+ uint8_t phy_type;
+
+ /* This value represents a media type. */
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN \
+ (UINT32_C(0x0) << 0)
+ /* Twisted Pair */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP (UINT32_C(0x1) << 0)
+ /* Direct Attached Copper */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC \
+ (UINT32_C(0x2) << 0)
+ /* Fiber */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE \
+ (UINT32_C(0x3) << 0)
+ uint8_t media_type;
+
+ /* This value represents a transceiver type. */
+ /* PHY and MAC are in the same package */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
+ (UINT32_C(0x1) << 0)
+ /* PHY and MAC are in different packages */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
+ (UINT32_C(0x2) << 0)
+ uint8_t xcvr_pkg_type;
+
+ /*
+ * This field represents flags related to EEE configuration. These EEE
+ * configuration flags are valid only when the auto_mode is not set to
+ * none (in other words autonegotiation is enabled).
+ */
+ /* This field represents PHY address. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
+ /*
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled.
+ * Speeds for autoneg with EEE mode enabled are based on
+ * eee_link_speed_mask.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
+ UINT32_C(0x20)
+ /*
+ * This flag is valid only when eee_enabled is set to 1. # If
+ * eee_enabled is set to 0, then EEE mode is disabled and this flag
+ * shall be ignored. # If eee_enabled is set to 1 and this flag is set
+ * to 1, then Energy Efficient Ethernet (EEE) mode is enabled and in
+ * use. # If eee_enabled is set to 1 and this flag is set to 0, then
+ * Energy Efficient Ethernet (EEE) mode is enabled but is currently not
+ * in use.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \
+ UINT32_C(0x40)
+ /*
+ * This flag is valid only when eee_enabled is set to 1. # If
+ * eee_enabled is set to 0, then EEE mode is disabled and this flag
+ * shall be ignored. # If eee_enabled is set to 1 and this flag is set
+ * to 1, then Energy Efficient Ethernet (EEE) mode is enabled and TX LPI
+ * is enabled. # If eee_enabled is set to 1 and this flag is set to 0,
+ * then Energy Efficient Ethernet (EEE) mode is enabled but TX LPI is
+ * disabled.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \
+ UINT32_C(0x80)
+ /*
+ * This field represents flags related to EEE configuration. These EEE
+ * configuration flags are valid only when the auto_mode is not set to
+ * none (in other words autonegotiation is enabled).
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
+ uint8_t eee_config_phy_addr;
+
+ /* Reserved field, set to 0 */
+ /*
+ * When set to 1, the parallel detection is used to determine the speed
+ * of the link partner. Parallel detection is used when a
+ * autonegotiation capable device is connected to a link parter that is
+ * not capable of autonegotiation.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT \
+ UINT32_C(0x1)
+ /* Reserved field, set to 0 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_MASK UINT32_C(0xfe)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_SFT 1
+ uint8_t parallel_detect;
+
+ /*
+ * The advertised speeds for the port by the link partner. Each
+ * advertised speed will be set to '1'.
+ */
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \
+ UINT32_C(0x10)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
+ UINT32_C(0x2000)
+ uint16_t link_partner_adv_speeds;
+
+ /*
+ * The advertised autoneg for the port by the link partner. This field
+ * is deprecated and should be set to 0.
+ */
+ /*
+ * Disable autoneg or autoneg disabled. No speeds are selected.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \
+ (UINT32_C(0x0) << 0)
+ /* Select all possible speeds for autoneg mode. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS\
+ (UINT32_C(0x1) << 0)
+ /*
+ * Select only the auto_link_speed speed for autoneg mode. This
+ * mode has been DEPRECATED. An HWRM client should not use this
+ * mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
+ (UINT32_C(0x2) << 0)
+ /*
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should
+ * not use this mode.
+ */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
+ (UINT32_C(0x3) << 0)
+ /*
+ * Select the speeds based on the corresponding link speed mask
+ * value that is provided.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK\
+ (UINT32_C(0x4) << 0)
+ uint8_t link_partner_adv_auto_mode;
+
+ /* The advertised pause settings on the port by the link partner. */
+ /*
+ * When this bit is '1', Generation of tx pause messages is supported.
+ * Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages is supported.
+ * Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \
+ UINT32_C(0x2)
+ uint8_t link_partner_adv_pause;
+
+ /*
+ * Current setting for link speed mask that is used to advertise speeds
+ * during autonegotiation when EEE is enabled. This field is valid only
+ * when eee_enabled flags is set to 1. The speeds specified in this
+ * field shall be a subset of speeds specified in auto_link_speed_mask.
+ */
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ uint16_t adv_eee_link_speed_mask;
+
+ /*
+ * Current setting for link speed mask that is advertised by the link
+ * partner when EEE is enabled. This field is valid only when
+ * eee_enabled flags is set to 1.
+ */
+ /* Reserved */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
+ /* Reserved */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
+ /* Reserved */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ uint16_t link_partner_adv_eee_link_speed_mask;
+
+ /* This value represents transceiver identifier type. */
+ /*
+ * Current setting of TX LPI timer in microseconds. This field is valid
+ * only when_eee_enabled flag is set to 1 and tx_lpi_enabled is set to
+ * 1.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
+ /* This value represents transceiver identifier type. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT \
+ 24
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
+ (UINT32_C(0x0) << 24)
+ /* SFP/SFP+/SFP28 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
+ (UINT32_C(0x3) << 24)
+ /* QSFP */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
+ (UINT32_C(0xc) << 24)
+ /* QSFP+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \
+ (UINT32_C(0xd) << 24)
+ /* QSFP28 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
+ (UINT32_C(0x11) << 24)
+ uint32_t xcvr_identifier_type_tx_lpi_timer;
+
+ uint32_t unused_1;
+
+ /*
+ * Up to 16 bytes of null padded ASCII string representing PHY vendor.
+ * If the string is set to null, then the vendor name is not available.
+ */
+ char phy_vendor_name[16];
+
+ /*
+ * Up to 16 bytes of null padded ASCII string that identifies vendor
+ * specific part number of the PHY. If the string is set to null, then
+ * the vendor specific part number is not available.
+ */
+ char phy_vendor_partnumber[16];
+
+ uint32_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+ uint8_t unused_5;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_ver_get */
+/*
+ * Description: This function is called by a driver to determine the HWRM
+ * interface version supported by the HWRM firmware, the version of HWRM
+ * firmware implementation, the name of HWRM firmware, the versions of other
+ * embedded firmwares, and the names of other embedded firmwares, etc. Any
+ * interface or firmware version with major = 0, minor = 0, and update = 0 shall
+ * be considered an invalid version.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the driver HWRM implementation. The
+ * interface major version is intended to change only when non backward
+ * compatible changes are made to the HWRM interface specification.
+ */
+ uint8_t hwrm_intf_maj;
+
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the driver HWRM implementation. A change
+ * in interface minor version is used to reflect significant backward
+ * compatible modification to HWRM interface specification. This can be
+ * due to addition or removal of functionality. HWRM interface
+ * specifications with the same major version but different minor
+ * versions are compatible.
+ */
+ uint8_t hwrm_intf_min;
+
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the driver HWRM implementation. The
+ * interface update version is used to reflect minor changes or bug
+ * fixes to a released HWRM interface specification.
+ */
+ uint8_t hwrm_intf_upd;
+
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the HWRM implementation. The interface
+ * major version is intended to change only when non backward compatible
+ * changes are made to the HWRM interface specification. A HWRM
+ * implementation that is compliant with this specification shall
+ * provide value of 1 in this field.
+ */
+ uint8_t hwrm_intf_maj;
+
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the HWRM implementation. A change in
+ * interface minor version is used to reflect significant backward
+ * compatible modification to HWRM interface specification. This can be
+ * due to addition or removal of functionality. HWRM interface
+ * specifications with the same major version but different minor
+ * versions are compatible. A HWRM implementation that is compliant with
+ * this specification shall provide value of 0 in this field.
+ */
+ uint8_t hwrm_intf_min;
+
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the HWRM implementation. The interface
+ * update version is used to reflect minor changes or bug fixes to a
+ * released HWRM interface specification. A HWRM implementation that is
+ * compliant with this specification shall provide value of 1 in this
+ * field.
+ */
+ uint8_t hwrm_intf_upd;
+
+ uint8_t hwrm_intf_rsvd;
+
+ /*
+ * This field represents the major version of HWRM firmware. A change in
+ * firmware major version represents a major firmware release.
+ */
+ uint8_t hwrm_fw_maj;
+
+ /*
+ * This field represents the minor version of HWRM firmware. A change in
+ * firmware minor version represents significant firmware functionality
+ * changes.
+ */
+ uint8_t hwrm_fw_min;
+
+ /*
+ * This field represents the build version of HWRM firmware. A change in
+ * firmware build version represents bug fixes to a released firmware.
+ */
+ uint8_t hwrm_fw_bld;
+
+ /*
+ * This field is a reserved field. This field can be used to represent
+ * firmware branches or customer specific releases tied to a specific
+ * (major,minor,update) version of the HWRM firmware.
+ */
+ uint8_t hwrm_fw_rsvd;
+
+ /*
+ * This field represents the major version of mgmt firmware. A change in
+ * major version represents a major release.
+ */
+ uint8_t mgmt_fw_maj;
+
+ /*
+ * This field represents the minor version of mgmt firmware. A change in
+ * minor version represents significant functionality changes.
+ */
+ uint8_t mgmt_fw_min;
+
+ /*
+ * This field represents the build version of mgmt firmware. A change in
+ * update version represents bug fixes.
+ */
+ uint8_t mgmt_fw_bld;
+
+ /*
+ * This field is a reserved field. This field can be used to represent
+ * firmware branches or customer specific releases tied to a specific
+ * (major,minor,update) version
+ */
+ uint8_t mgmt_fw_rsvd;
+
+ /*
+ * This field represents the major version of network control firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t netctrl_fw_maj;
+
+ /*
+ * This field represents the minor version of network control firmware.
+ * A change in minor version represents significant functionality
+ * changes.
+ */
+ uint8_t netctrl_fw_min;
+
+ /*
+ * This field represents the build version of network control firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t netctrl_fw_bld;
+
+ /*
+ * This field is a reserved field. This field can be used to represent
+ * firmware branches or customer specific releases tied to a specific
+ * (major,minor,update) version
+ */
+ uint8_t netctrl_fw_rsvd;
+
+ /*
+ * This field is reserved for future use. The responder should set it to
+ * 0. The requester should ignore this field.
+ */
+ uint32_t reserved1;
+
+ /*
+ * This field represents the major version of RoCE firmware. A change in
+ * major version represents a major release.
+ */
+ uint8_t roce_fw_maj;
+
+ /*
+ * This field represents the minor version of RoCE firmware. A change in
+ * minor version represents significant functionality changes.
+ */
+ uint8_t roce_fw_min;
+
+ /*
+ * This field represents the build version of RoCE firmware. A change in
+ * update version represents bug fixes.
+ */
+ uint8_t roce_fw_bld;
+
+ /*
+ * This field is a reserved field. This field can be used to represent
+ * firmware branches or customer specific releases tied to a specific
+ * (major,minor,update) version
+ */
+ uint8_t roce_fw_rsvd;
+
+ /*
+ * This field represents the name of HWRM FW (ASCII chars without NULL
+ * at the end).
+ */
+ char hwrm_fw_name[16];
+
+ /*
+ * This field represents the name of mgmt FW (ASCII chars without NULL
+ * at the end).
+ */
+ char mgmt_fw_name[16];
+
+ /*
+ * This field represents the name of network control firmware (ASCII
+ * chars without NULL at the end).
+ */
+ char netctrl_fw_name[16];
+
+ /*
+ * This field is reserved for future use. The responder should set it to
+ * 0. The requester should ignore this field.
+ */
+ uint32_t reserved2[4];
+
+ /*
+ * This field represents the name of RoCE FW (ASCII chars without NULL
+ * at the end).
+ */
+ char roce_fw_name[16];
+
+ /* This field returns the chip number. */
+ uint16_t chip_num;
+
+ /* This field returns the revision of chip. */
+ uint8_t chip_rev;
+
+ /* This field returns the chip metal number. */
+ uint8_t chip_metal;
+
+ /* This field returns the bond id of the chip. */
+ uint8_t chip_bond_id;
+
+ /*
+ * This value indicates the type of platform used for chip
+ * implementation.
+ */
+ /* ASIC */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC \
+ (UINT32_C(0x0) << 0)
+ /* FPGA platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA \
+ (UINT32_C(0x1) << 0)
+ /* Palladium platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM \
+ (UINT32_C(0x2) << 0)
+ uint8_t chip_platform_type;
+
+ /*
+ * This field returns the maximum value of request window that is
+ * supported by the HWRM. The request window is mapped into device
+ * address space using MMIO.
+ */
+ uint16_t max_req_win_len;
+
+ /*
+ * This field returns the maximum value of response buffer in bytes. If
+ * a request specifies the response buffer length that is greater than
+ * this value, then the HWRM should fail it. The value of this field
+ * shall be 4KB or more.
+ */
+ uint16_t max_resp_len;
+
+ /*
+ * This field returns the default request timeout value in milliseconds.
+ */
+ uint16_t def_req_timeout;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_queue_qportcfg */
+/*
+ * Description: This function is called by a driver to query queue configuration
+ * of a port. # The HWRM shall at least advertise one queue with lossy service
+ * profile. # The driver shall use this command to query queue ids before
+ * configuring or using any queues. # If a service profile is not set for a
+ * queue, then the driver shall not use that queue without configuring a service
+ * profile for it. # If the driver is not allowed to configure service profiles,
+ * then the driver shall only use queues for which service profiles are pre-
+ * configured.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * Enumeration denoting the RX, TX type of the resource. This
+ * enumeration is used for resources that are similar for both TX and RX
+ * paths of the chip.
+ */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX \
+ (UINT32_C(0x0) << 0)
+ /* rx path */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX \
+ (UINT32_C(0x1) << 0)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
+ uint32_t flags;
+
+ /*
+ * Port ID of port for which the queue configuration is being queried.
+ * This field is only required when sent by IPC.
+ */
+ uint16_t port_id;
+
+ uint16_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_ring_alloc */
+/*
+ * Description: This command allocates and does basic preparation for a ring.
+ */
+
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* This bit must be '1' for the Reserved1 field to be configured. */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1)
+ /* This bit must be '1' for the Reserved2 field to be configured. */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED2 UINT32_C(0x2)
+ /* This bit must be '1' for the Reserved3 field to be configured. */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the stat_ctx_id_valid field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID UINT32_C(0x8)
+ /* This bit must be '1' for the Reserved4 field to be configured. */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10)
+ /* This bit must be '1' for the max_bw_valid field to be configured. */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20)
+ uint32_t enables;
+
+ /* Ring Type. */
+ /* Completion Ring (CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL (UINT32_C(0x0) << 0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX (UINT32_C(0x1) << 0)
+ /* RX Ring (RR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX (UINT32_C(0x2) << 0)
+ uint8_t ring_type;
+
+ uint8_t unused_0;
+ uint16_t unused_1;
+
+ /* This value is a pointer to the page table for the Ring. */
+ uint64_t page_tbl_addr;
+
+ /* First Byte Offset of the first entry in the first page. */
+ uint32_t fbo;
+
+ /*
+ * Actual page size in 2^page_size. The supported range is increments in
+ * powers of 2 from 16 bytes to 1GB. - 4 = 16 B Page size is 16 B. - 12
+ * = 4 KB Page size is 4 KB. - 13 = 8 KB Page size is 8 KB. - 16 = 64 KB
+ * Page size is 64 KB. - 22 = 2 MB Page size is 2 MB. - 23 = 4 MB Page
+ * size is 4 MB. - 31 = 1 GB Page size is 1 GB.
+ */
+ uint8_t page_size;
+
+ /*
+ * This value indicates the depth of page table. For this version of the
+ * specification, value other than 0 or 1 shall be considered as an
+ * invalid value. When the page_tbl_depth = 0, then it is treated as a
+ * special case with the following. 1. FBO and page size fields are not
+ * valid. 2. page_tbl_addr is the physical address of the first element
+ * of the ring.
+ */
+ uint8_t page_tbl_depth;
+
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * Number of 16B units in the ring. Minimum size for a ring is 16 16B
+ * entries.
+ */
+ uint32_t length;
+
+ /*
+ * Logical ring number for the ring to be allocated. This value
+ * determines the position in the doorbell area where the update to the
+ * ring will be made. For completion rings, this value is also the MSI-X
+ * vector number for the function the completion ring is associated
+ * with.
+ */
+ uint16_t logical_id;
+
+ /*
+ * This field is used only when ring_type is a TX ring. This value
+ * indicates what completion ring the TX ring is associated with.
+ */
+ uint16_t cmpl_ring_id;
+
+ /*
+ * This field is used only when ring_type is a TX ring. This value
+ * indicates what CoS queue the TX ring is associated with.
+ */
+ uint16_t queue_id;
+
+ uint8_t unused_4;
+ uint8_t unused_5;
+
+ /* This field is reserved for the future use. It shall be set to 0. */
+ uint32_t reserved1;
+ /* This field is reserved for the future use. It shall be set to 0. */
+ uint16_t reserved2;
+
+ uint8_t unused_6;
+ uint8_t unused_7;
+ /* This field is reserved for the future use. It shall be set to 0. */
+ uint32_t reserved3;
+
+ /*
+ * This field is used only when ring_type is a TX ring. This input
+ * indicates what statistics context this ring should be associated
+ * with.
+ */
+ uint32_t stat_ctx_id;
+
+ /* This field is reserved for the future use. It shall be set to 0. */
+ uint32_t reserved4;
+
+ /*
+ * This field is used only when ring_type is a TX ring. Maximum BW
+ * allocated to this TX ring in Mbps. The HWRM will translate this value
+ * into byte counter and time interval used for this ring inside the
+ * device.
+ */
+ uint32_t max_bw;
+
+ /*
+ * This field is used only when ring_type is a Completion ring. This
+ * value indicates what interrupt mode should be used on this completion
+ * ring. Note: In the legacy interrupt mode, no more than 16 completion
+ * rings are allowed.
+ */
+ /* Legacy INTA */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY (UINT32_C(0x0) << 0)
+ /* Reserved */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD (UINT32_C(0x1) << 0)
+ /* MSI-X */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX (UINT32_C(0x2) << 0)
+ /* No Interrupt - Polled mode */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL (UINT32_C(0x3) << 0)
+ uint8_t int_mode;
+
+ uint8_t unused_8[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_ring_alloc_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /* Physical number of ring allocated. */
+ uint16_t ring_id;
+
+ /* Logical number of ring allocated. */
+ uint16_t logical_ring_id;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_ring_free */
+/*
+ * Description: This command is used to free a ring and associated resources.
+ */
+/* Input (24 bytes) */
+
+struct hwrm_ring_free_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* Ring Type. */
+ /* Completion Ring (CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_CMPL (UINT32_C(0x0) << 0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_TX (UINT32_C(0x1) << 0)
+ /* RX Ring (RR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX (UINT32_C(0x2) << 0)
+ uint8_t ring_type;
+
+ uint8_t unused_0;
+
+ /* Physical number of ring allocated. */
+ uint16_t ring_id;
+
+ uint32_t unused_1;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_ring_grp_alloc */
+/*
+ * Description: This API allocates and does basic preparation for a ring group.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* This value identifies the CR associated with the ring group. */
+ uint16_t cr;
+
+ /* This value identifies the main RR associated with the ring group. */
+ uint16_t rr;
+
+ /*
+ * This value identifies the aggregation RR associated with the ring
+ * group. If this value is 0xFF... (All Fs), then no Aggregation ring
+ * will be set.
+ */
+ uint16_t ar;
+
+ /*
+ * This value identifies the statistics context associated with the ring
+ * group.
+ */
+ uint16_t sc;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /*
+ * This is the ring group ID value. Use this value to program the
+ * default ring group for the VNIC or as table entries in an RSS/COS
+ * context.
+ */
+ uint32_t ring_group_id;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_ring_grp_free */
+/*
+ * Description: This API frees a ring group and associated resources. # If a
+ * ring in the ring group is reset or free, then the associated rings in the
+ * ring group shall also be reset/free using hwrm_ring_free. # A function driver
+ * shall always use hwrm_ring_grp_free after freeing all rings in a group. # As
+ * a part of executing this command, the HWRM shall reset all associated ring
+ * group resources.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* This is the ring group ID value. */
+ uint32_t ring_group_id;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_alloc */
+/*
+ * Description: This command allocates and does basic preparation for a stat
+ * context.
+ */
+
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* This is the address for statistic block. */
+ uint64_t stats_dma_addr;
+
+ /*
+ * The statistic block update period in ms. e.g. 250ms, 500ms, 750ms,
+ * 1000ms.
+ */
+ uint32_t update_period_ms;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_clr_stats */
+/* Description: This command clears statistics of a context. */
+
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_free */
+/* Description: This command is used to free a stat context. */
+/* Input (24 bytes) */
+
+struct hwrm_stat_ctx_free_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_stat_ctx_free_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_vnic_alloc */
+/*
+ * Description: This VNIC is a resource in the RX side of the chip that is used
+ * to represent a virtual host "interface". # At the time of VNIC allocation or
+ * configuration, the function can specify whether it wants the requested VNIC
+ * to be the default VNIC for the function or not. # If a function requests
+ * allocation of a VNIC for the first time and a VNIC is successfully allocated
+ * by the HWRM, then the HWRM shall make the allocated VNIC as the default VNIC
+ * for that function. # The default VNIC shall be used for the default action
+ * for a partition or function. # For each VNIC allocated on a function, a
+ * mapping on the RX side to map the allocated VNIC to source virtual interface
+ * shall be performed by the HWRM. This should be hidden to the function driver
+ * requesting the VNIC allocation. This enables broadcast/multicast replication
+ * with source knockout. # If multicast replication with source knockout is
+ * enabled, then the internal VNIC to SVIF mapping data structures shall be
+ * programmed at the time of VNIC allocation.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * When this bit is '1', this VNIC is requested to be the default VNIC
+ * for this function.
+ */
+ #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ uint32_t flags;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_vnic_cfg */
+/* Description: Configure the RX VNIC structure. */
+
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * When this bit is '1', the VNIC is requested to be the default VNIC
+ * for the function.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is being configured to strip VLAN in
+ * the RX path. If set to '0', then VLAN stripping is disabled on this
+ * VNIC.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is being configured to buffer receive
+ * packets in the hardware until the host posts new receive buffers. If
+ * set to '0', then bd_stall is being configured to be disabled on this
+ * VNIC.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is being configured to receive both
+ * RoCE and non-RoCE traffic. If set to '0', then this VNIC is not
+ * configured to be operating in dual VNIC mode.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
+ /*
+ * When this flag is set to '1', the VNIC is requested to be configured
+ * to receive only RoCE traffic. If this flag is set to '0', then this
+ * flag shall be ignored by the HWRM. If roce_dual_vnic_mode flag is set
+ * to '1', then the HWRM client shall not set this flag to '1'.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ uint32_t flags;
+
+ /* This bit must be '1' for the dflt_ring_grp field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1)
+ /* This bit must be '1' for the rss_rule field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2)
+ /* This bit must be '1' for the cos_rule field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4)
+ /* This bit must be '1' for the lb_rule field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8)
+ /* This bit must be '1' for the mru field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10)
+ uint32_t enables;
+
+ /* Logical vnic ID */
+ uint16_t vnic_id;
+
+ /*
+ * Default Completion ring for the VNIC. This ring will be chosen if
+ * packet does not match any RSS rules and if there is no COS rule.
+ */
+ uint16_t dflt_ring_grp;
+
+ /*
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if there is no
+ * RSS rule.
+ */
+ uint16_t rss_rule;
+
+ /*
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if there is no
+ * COS rule.
+ */
+ uint16_t cos_rule;
+
+ /*
+ * RSS ID for load balancing rule/table structure. 0xFF... (All Fs) if
+ * there is no LB rule.
+ */
+ uint16_t lb_rule;
+
+ /*
+ * The maximum receive unit of the vnic. Each vnic is associated with a
+ * function. The vnic mru value overwrites the mru setting of the
+ * associated function. The HWRM shall make sure that vnic mru does not
+ * exceed the mru of the port the function is associated with.
+ */
+ uint16_t mru;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_vnic_free */
+/*
+ * Description: Free a VNIC resource. Idle any resources associated with the
+ * VNIC as well as the VNIC. Reset and release all resources associated with the
+ * VNIC.
+ */
+
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_vnic_rss_cfg */
+/* Description: This function is used to enable RSS configuration. */
+
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * When this bit is '1', the RSS hash shall be computed over source and
+ * destination IPv4 addresses of IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv4 addresses and source/destination ports of
+ * TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv4 addresses and source/destination ports of
+ * UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over source and
+ * destination IPv4 addresses of IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv6 addresses and source/destination ports of
+ * TCP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv6 addresses and source/destination ports of
+ * UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+ uint32_t hash_type;
+
+ uint32_t unused_0;
+
+ /* This is the address for rss ring group table */
+ uint64_t ring_grp_tbl_addr;
+
+ /* This is the address for rss hash key table */
+ uint64_t hash_key_tbl_addr;
+
+ /* Index to the rss indirection table. */
+ uint16_t rss_ctx_idx;
+
+ uint16_t unused_1[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /* rss_cos_lb_ctx_id is 16 b */
+ uint16_t rss_cos_lb_ctx_id;
+
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Description: This function can be used to free COS/Load Balance context. */
+/* Input (24 bytes) */
+
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /* rss_cos_lb_ctx_id is 16 b */
+ uint16_t rss_cos_lb_ctx_id;
+
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ /* The maximum number of queues that can be configured. */
+ uint8_t max_configurable_queues;
+
+ /* The maximum number of lossless queues that can be configured. */
+ uint8_t max_configurable_lossless_queues;
+
+ /*
+ * 0 - Not allowed. Non-zero - Allowed. If this value is non-zero, then
+ * the HWRM shall allow the host SW driver to configure queues using
+ * hwrm_queue_cfg.
+ */
+ uint8_t queue_cfg_allowed;
+
+ /*
+ * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then
+ * the HWRM shall allow the host SW driver to configure queue buffers
+ * using hwrm_queue_buffers_cfg.
+ */
+ uint8_t queue_buffers_cfg_allowed;
+
+ /*
+ * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then
+ * the HWRM shall allow the host SW driver to configure PFC using
+ * hwrm_queue_pfcenable_cfg.
+ */
+ uint8_t queue_pfcenable_cfg_allowed;
+
+ /*
+ * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then
+ * the HWRM shall allow the host SW driver to configure Priority to CoS
+ * mapping using hwrm_queue_pri2cos_cfg.
+ */
+ uint8_t queue_pri2cos_cfg_allowed;
+
+ /*
+ * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then
+ * the HWRM shall allow the host SW driver to configure CoS Bandwidth
+ * configuration using hwrm_queue_cos2bw_cfg.
+ */
+ uint8_t queue_cos2bw_cfg_allowed;
+
+ /* ID of CoS Queue 0. FF - Invalid id */
+ uint8_t queue_id0;
+
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \
+ (UINT32_C(0x0) << 0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Set to 0xFF... (All Fs) if there is no service profile
+ * specified
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \
+ (UINT32_C(0xff) << 0)
+ uint8_t queue_id0_service_profile;
+
+ /* ID of CoS Queue 1. FF - Invalid id */
+ uint8_t queue_id1;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \
+ (UINT32_C(0x0) << 0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Set to 0xFF... (All Fs) if there is no service profile
+ * specified
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \
+ (UINT32_C(0xff) << 0)
+ uint8_t queue_id1_service_profile;
+
+ /* ID of CoS Queue 2. FF - Invalid id */
+ uint8_t queue_id2;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \
+ (UINT32_C(0x0) << 0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Set to 0xFF... (All Fs) if there is no service profile
+ * specified
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \
+ (UINT32_C(0xff) << 0)
+ uint8_t queue_id2_service_profile;
+
+ /* ID of CoS Queue 3. FF - Invalid id */
+ uint8_t queue_id3;
+
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \
+ (UINT32_C(0x0) << 0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Set to 0xFF... (All Fs) if there is no service profile
+ * specified
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \
+ (UINT32_C(0xff) << 0)
+ uint8_t queue_id3_service_profile;
+
+ /* ID of CoS Queue 4. FF - Invalid id */
+ uint8_t queue_id4;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \
+ (UINT32_C(0x0) << 0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Set to 0xFF... (All Fs) if there is no service profile
+ * specified
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \
+ (UINT32_C(0xff) << 0)
+ uint8_t queue_id4_service_profile;
+
+ /* ID of CoS Queue 5. FF - Invalid id */
+ uint8_t queue_id5;
+
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \
+ (UINT32_C(0x0) << 0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Set to 0xFF... (All Fs) if there is no service profile
+ * specified
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \
+ (UINT32_C(0xff) << 0)
+ uint8_t queue_id5_service_profile;
+
+ /* ID of CoS Queue 6. FF - Invalid id */
+ uint8_t queue_id6_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \
+ (UINT32_C(0x0) << 0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Set to 0xFF... (All Fs) if there is no service profile
+ * specified
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \
+ (UINT32_C(0xff) << 0)
+ uint8_t queue_id6;
+
+ /* ID of CoS Queue 7. FF - Invalid id */
+ uint8_t queue_id7;
+
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \
+ (UINT32_C(0x0) << 0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Set to 0xFF... (All Fs) if there is no service profile
+ * specified
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \
+ (UINT32_C(0xff) << 0)
+ uint8_t queue_id7_service_profile;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_func_drv_rgtr */
+/*
+ * Description: This command is used by the function driver to register its
+ * information with the HWRM. A function driver shall implement this command. A
+ * function driver shall use this command during the driver initialization right
+ * after the HWRM version discovery and default ring resources allocation.
+ */
+
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * When this bit is '1', the function driver is requesting all requests
+ * from its children VF drivers to be forwarded to itself. This flag can
+ * only be set by the PF driver. If a VF driver sets this flag, it
+ * should be ignored by the HWRM.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
+ /*
+ * When this bit is '1', the function is requesting none of the requests
+ * from its children VF drivers to be forwarded to itself. This flag can
+ * only be set by the PF driver. If a VF driver sets this flag, it
+ * should be ignored by the HWRM.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
+ uint32_t flags;
+
+ /* This bit must be '1' for the os_type field to be configured. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1)
+ /* This bit must be '1' for the ver field to be configured. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2)
+ /* This bit must be '1' for the timestamp field to be configured. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4)
+ /* This bit must be '1' for the vf_req_fwd field to be configured. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the async_event_fwd field to be configured.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \
+ UINT32_C(0x10)
+ uint32_t enables;
+
+ /* This value indicates the type of OS. */
+ /* Unknown */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN \
+ (UINT32_C(0x0) << 0)
+ /* Other OS not listed below. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER \
+ (UINT32_C(0x1) << 0)
+ /* MSDOS OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS \
+ (UINT32_C(0xe) << 0)
+ /* Windows OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS \
+ (UINT32_C(0x12) << 0)
+ /* Solaris OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS \
+ (UINT32_C(0x1d) << 0)
+ /* Linux OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX \
+ (UINT32_C(0x24) << 0)
+ /* FreeBSD OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD \
+ (UINT32_C(0x2a) << 0)
+ /* VMware ESXi OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI \
+ (UINT32_C(0x68) << 0)
+ /* Microsoft Windows 8 64-bit OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 \
+ (UINT32_C(0x73) << 0)
+ /* Microsoft Windows Server 2012 R2 OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 \
+ (UINT32_C(0x74) << 0)
+ uint16_t os_type;
+
+ /* This is the major version of the driver. */
+ uint8_t ver_maj;
+
+ /* This is the minor version of the driver. */
+ uint8_t ver_min;
+
+ /* This is the update version of the driver. */
+ uint8_t ver_upd;
+
+ uint8_t unused_0;
+ uint16_t unused_1;
+
+ /*
+ * This is a 32-bit timestamp provided by the driver for keep alive. The
+ * timestamp is in multiples of 1ms.
+ */
+ uint32_t timestamp;
+
+ uint32_t unused_2;
+
+ /*
+ * This is a 256-bit bit mask provided by the PF driver for letting the
+ * HWRM know what commands issued by the VF driver to the HWRM should be
+ * forwarded to the PF driver. Nth bit refers to the Nth req_type.
+ * Setting Nth bit to 1 indicates that requests from the VF driver with
+ * req_type equal to N shall be forwarded to the parent PF driver. This
+ * field is not valid for the VF driver.
+ */
+ uint32_t vf_req_fwd[8];
+
+ /*
+ * This is a 256-bit bit mask provided by the function driver (PF or VF
+ * driver) to indicate the list of asynchronous event completions to be
+ * forwarded. Nth bit refers to the Nth event_id. Setting Nth bit to 1
+ * by the function driver shall result in the HWRM forwarding
+ * asynchronous event completion with event_id equal to N. If all bits
+ * are set to 0 (value of 0), then the HWRM shall not forward any
+ * asynchronous event completion to this function driver.
+ */
+ uint32_t async_event_fwd[8];
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+
+struct hwrm_func_drv_rgtr_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_func_drv_unrgtr */
+/*
+ * Description: This command is used by the function driver to un register with
+ * the HWRM. A function driver shall implement this command. A function driver
+ * shall use this command during the driver unloading.
+ */
+/* Input (24 bytes) */
+
+struct hwrm_func_drv_unrgtr_input {
+ /*
+ * This value indicates what type of request this is. The format for the
+ * rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+
+ /*
+ * This value indicates the what completion ring the request will be
+ * optionally completed on. If the value is -1, then no CR completion
+ * will be generated. Any other value must be a valid CR ring_id value
+ * for this function.
+ */
+ uint16_t cmpl_ring;
+
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+
+ /*
+ * This is the host address where the response will be written when the
+ * request is complete. This area must be 16B aligned and must be
+ * cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+
+ /*
+ * When this bit is '1', the function driver is notifying the HWRM to
+ * prepare for the shutdown.
+ */
+ #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
+ UINT32_C(0x1)
+ uint32_t flags;
+
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in parameters,
+ * and fail the call with an error when appropriate
+ */
+ uint16_t error_code;
+
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+
+ /*
+ * This field is the length of the response in bytes. The last byte of
+ * the response is a valid flag that will read as '1' when the command
+ * has been completely written to memory.
+ */
+ uint16_t resp_len;
+
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing a
+ * command completion or response to an internal processor, the order of
+ * writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+#endif
diff --git a/drivers/net/bnxt/rte_pmd_bnxt_version.map b/drivers/net/bnxt/rte_pmd_bnxt_version.map
new file mode 100644
index 00000000..349c6e1c
--- /dev/null
+++ b/drivers/net/bnxt/rte_pmd_bnxt_version.map
@@ -0,0 +1,4 @@
+DPDK_16.04 {
+
+ local: *;
+};
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 10c794c4..504f2e8b 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -64,5 +64,7 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_kvargs
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_cmdline
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ring
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 8b4db507..48a50e4e 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -39,9 +39,12 @@
#include <rte_malloc.h>
#include <rte_errno.h>
#include <rte_cycles.h>
+#include <rte_compat.h>
#include "rte_eth_bond_private.h"
+static void bond_mode_8023ad_ext_periodic_cb(void *arg);
+
#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
bond_dbg_get_time_diff_ms(), slave_id, \
@@ -512,7 +515,7 @@ mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
if (!ACTOR_STATE(port, SYNCHRONIZATION)) {
/* attach mux to aggregator */
- RTE_VERIFY((port->actor_state & (STATE_COLLECTING |
+ RTE_ASSERT((port->actor_state & (STATE_COLLECTING |
STATE_DISTRIBUTING)) == 0);
ACTOR_STATE_SET(port, SYNCHRONIZATION);
@@ -813,7 +816,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
struct lacpdu_header *lacp;
lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
- RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+ RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
/* This is LACP frame so pass it to rx_machine */
rx_machine(internals, slave_id, &lacp->lacpdu);
@@ -856,8 +859,9 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
uint16_t q_id;
/* Given slave mus not be in active list */
- RTE_VERIFY(find_slave_by_id(internals->active_slaves,
+ RTE_ASSERT(find_slave_by_id(internals->active_slaves,
internals->active_slave_count, slave_id) == internals->active_slave_count);
+ RTE_SET_USED(internals); /* used only for assert when enabled */
memcpy(&port->actor, &initial, sizeof(struct port_params));
/* Standard requires that port ID must be grater than 0.
@@ -880,8 +884,8 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
if (port->mbuf_pool != NULL)
return;
- RTE_VERIFY(port->rx_ring == NULL);
- RTE_VERIFY(port->tx_ring == NULL);
+ RTE_ASSERT(port->rx_ring == NULL);
+ RTE_ASSERT(port->tx_ring == NULL);
socket_id = rte_eth_devices[slave_id].data->numa_node;
element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
@@ -939,7 +943,7 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
uint8_t i;
/* Given slave must be in active list */
- RTE_VERIFY(find_slave_by_id(internals->active_slaves,
+ RTE_ASSERT(find_slave_by_id(internals->active_slaves,
internals->active_slave_count, slave_id) < internals->active_slave_count);
/* Exclude slave from transmit policy. If this slave is an aggregator
@@ -1004,7 +1008,7 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
bond_mode_8023ad_start(bond_dev);
}
-void
+static void
bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
struct rte_eth_bond_8023ad_conf *conf)
{
@@ -1022,26 +1026,36 @@ bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks;
}
-void
-bond_mode_8023ad_setup(struct rte_eth_dev *dev,
+static void
+bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev,
struct rte_eth_bond_8023ad_conf *conf)
{
- struct rte_eth_bond_8023ad_conf def_conf;
struct bond_dev_private *internals = dev->data->dev_private;
struct mode8023ad_private *mode4 = &internals->mode4;
- uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
- if (conf == NULL) {
- conf = &def_conf;
- conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
- conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
- conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
- conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
- conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
- conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
- conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
- conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
- }
+ bond_mode_8023ad_conf_get(dev, conf);
+ conf->slowrx_cb = mode4->slowrx_cb;
+}
+
+static void
+bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf)
+{
+ conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS;
+ conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS;
+ conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS;
+ conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS;
+ conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS;
+ conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS;
+ conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS;
+ conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS;
+ conf->slowrx_cb = NULL;
+}
+
+static void
+bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ uint64_t ms_ticks = rte_get_tsc_hz() / 1000;
mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks;
mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks;
@@ -1053,6 +1067,48 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev,
mode4->update_timeout_us = conf->update_timeout_ms * 1000;
}
+static void
+bond_mode_8023ad_setup_v1604(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_bond_8023ad_conf def_conf;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ if (conf == NULL) {
+ conf = &def_conf;
+ bond_mode_8023ad_conf_get_default(conf);
+ }
+
+ bond_mode_8023ad_stop(dev);
+ bond_mode_8023ad_conf_assign(mode4, conf);
+
+ if (dev->data->dev_started)
+ bond_mode_8023ad_start(dev);
+}
+
+
+void
+bond_mode_8023ad_setup(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_bond_8023ad_conf def_conf;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ if (conf == NULL) {
+ conf = &def_conf;
+ bond_mode_8023ad_conf_get_default(conf);
+ }
+
+ bond_mode_8023ad_stop(dev);
+ bond_mode_8023ad_conf_assign(mode4, conf);
+ mode4->slowrx_cb = conf->slowrx_cb;
+
+ if (dev->data->dev_started)
+ bond_mode_8023ad_start(dev);
+}
+
int
bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
{
@@ -1068,13 +1124,28 @@ bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
int
bond_mode_8023ad_start(struct rte_eth_dev *bond_dev)
{
- return rte_eal_alarm_set(BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000,
- &bond_mode_8023ad_periodic_cb, bond_dev);
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000;
+
+ if (mode4->slowrx_cb)
+ return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb,
+ bond_dev);
+
+ return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev);
}
void
bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev)
{
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+
+ if (mode4->slowrx_cb) {
+ rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb,
+ bond_dev);
+ return;
+ }
rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev);
}
@@ -1143,7 +1214,7 @@ free_out:
}
int
-rte_eth_bond_8023ad_conf_get(uint8_t port_id,
+rte_eth_bond_8023ad_conf_get_v1604(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf)
{
struct rte_eth_dev *bond_dev;
@@ -1158,9 +1229,10 @@ rte_eth_bond_8023ad_conf_get(uint8_t port_id,
bond_mode_8023ad_conf_get(bond_dev, conf);
return 0;
}
+VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1604, 16.04);
int
-rte_eth_bond_8023ad_setup(uint8_t port_id,
+rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf)
{
struct rte_eth_dev *bond_dev;
@@ -1168,6 +1240,25 @@ rte_eth_bond_8023ad_setup(uint8_t port_id,
if (valid_bonded_port_id(port_id) != 0)
return -EINVAL;
+ if (conf == NULL)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_conf_get_v1607(bond_dev, conf);
+ return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07);
+MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf),
+ rte_eth_bond_8023ad_conf_get_v1607);
+
+static int
+bond_8023ad_setup_validate(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ if (valid_bonded_port_id(port_id) != 0)
+ return -EINVAL;
+
if (conf != NULL) {
/* Basic sanity check */
if (conf->slow_periodic_ms == 0 ||
@@ -1183,11 +1274,47 @@ rte_eth_bond_8023ad_setup(uint8_t port_id,
}
}
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_setup_v1604(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+ int err;
+
+ err = bond_8023ad_setup_validate(port_id, conf);
+ if (err != 0)
+ return err;
+
+ bond_dev = &rte_eth_devices[port_id];
+ bond_mode_8023ad_setup_v1604(bond_dev, conf);
+
+ return 0;
+}
+VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v1604, 16.04);
+
+int
+rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf)
+{
+ struct rte_eth_dev *bond_dev;
+ int err;
+
+ err = bond_8023ad_setup_validate(port_id, conf);
+ if (err != 0)
+ return err;
+
bond_dev = &rte_eth_devices[port_id];
bond_mode_8023ad_setup(bond_dev, conf);
return 0;
}
+BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07);
+MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf),
+ rte_eth_bond_8023ad_setup_v1607);
int
rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
@@ -1221,3 +1348,160 @@ rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
info->agg_port_id = port->aggregator_port_id;
return 0;
}
+
+static int
+bond_8023ad_ext_validate(uint8_t port_id, uint8_t slave_id)
+{
+ struct rte_eth_dev *bond_dev;
+ struct bond_dev_private *internals;
+ struct mode8023ad_private *mode4;
+
+ if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD)
+ return -EINVAL;
+
+ bond_dev = &rte_eth_devices[port_id];
+
+ if (!bond_dev->data->dev_started)
+ return -EINVAL;
+
+ internals = bond_dev->data->dev_private;
+ if (find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) ==
+ internals->active_slave_count)
+ return -EINVAL;
+
+ mode4 = &internals->mode4;
+ if (mode4->slowrx_cb == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled)
+{
+ struct port *port;
+ int res;
+
+ res = bond_8023ad_ext_validate(port_id, slave_id);
+ if (res != 0)
+ return res;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ if (enabled)
+ ACTOR_STATE_SET(port, COLLECTING);
+ else
+ ACTOR_STATE_CLR(port, COLLECTING);
+
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled)
+{
+ struct port *port;
+ int res;
+
+ res = bond_8023ad_ext_validate(port_id, slave_id);
+ if (res != 0)
+ return res;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ if (enabled)
+ ACTOR_STATE_SET(port, DISTRIBUTING);
+ else
+ ACTOR_STATE_CLR(port, DISTRIBUTING);
+
+ return 0;
+}
+
+int
+rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id)
+{
+ struct port *port;
+ int err;
+
+ err = bond_8023ad_ext_validate(port_id, slave_id);
+ if (err != 0)
+ return err;
+
+ port = &mode_8023ad_ports[slave_id];
+ return ACTOR_STATE(port, DISTRIBUTING);
+}
+
+int
+rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id)
+{
+ struct port *port;
+ int err;
+
+ err = bond_8023ad_ext_validate(port_id, slave_id);
+ if (err != 0)
+ return err;
+
+ port = &mode_8023ad_ports[slave_id];
+ return ACTOR_STATE(port, COLLECTING);
+}
+
+int
+rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
+ struct rte_mbuf *lacp_pkt)
+{
+ struct port *port;
+ int res;
+
+ res = bond_8023ad_ext_validate(port_id, slave_id);
+ if (res != 0)
+ return res;
+
+ port = &mode_8023ad_ports[slave_id];
+
+ if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
+ return -EINVAL;
+
+ struct lacpdu_header *lacp;
+
+ /* only enqueue LACPDUs */
+ lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *);
+ if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP)
+ return -EINVAL;
+
+ MODE4_DEBUG("sending LACP frame\n");
+
+ return rte_ring_enqueue(port->tx_ring, lacp_pkt);
+}
+
+static void
+bond_mode_8023ad_ext_periodic_cb(void *arg)
+{
+ struct rte_eth_dev *bond_dev = arg;
+ struct bond_dev_private *internals = bond_dev->data->dev_private;
+ struct mode8023ad_private *mode4 = &internals->mode4;
+ struct port *port;
+ void *pkt = NULL;
+ uint16_t i, slave_id;
+
+ for (i = 0; i < internals->active_slave_count; i++) {
+ slave_id = internals->active_slaves[i];
+ port = &mode_8023ad_ports[slave_id];
+
+ if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
+ struct rte_mbuf *lacp_pkt = pkt;
+ struct lacpdu_header *lacp;
+
+ lacp = rte_pktmbuf_mtod(lacp_pkt,
+ struct lacpdu_header *);
+ RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP);
+
+ /* This is LACP frame so pass it to rx callback.
+ * Callback is responsible for freeing mbuf.
+ */
+ mode4->slowrx_cb(slave_id, lacp_pkt);
+ }
+ }
+
+ rte_eal_alarm_set(internals->mode4.update_timeout_us,
+ bond_mode_8023ad_ext_periodic_cb, arg);
+}
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h
index ebd0e934..1de34bc8 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -64,6 +64,9 @@ extern "C" {
#define MARKER_TLV_TYPE_INFO 0x01
#define MARKER_TLV_TYPE_RESP 0x02
+typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint8_t slave_id,
+ struct rte_mbuf *lacp_pkt);
+
enum rte_bond_8023ad_selection {
UNSELECTED,
STANDBY,
@@ -157,6 +160,7 @@ struct rte_eth_bond_8023ad_conf {
uint32_t tx_period_ms;
uint32_t rx_marker_period_ms;
uint32_t update_timeout_ms;
+ rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
};
struct rte_eth_bond_8023ad_slave_info {
@@ -183,6 +187,12 @@ struct rte_eth_bond_8023ad_slave_info {
int
rte_eth_bond_8023ad_conf_get(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
+int
+rte_eth_bond_8023ad_conf_get_v1604(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
+int
+rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
/**
* @internal
@@ -198,6 +208,12 @@ rte_eth_bond_8023ad_conf_get(uint8_t port_id,
int
rte_eth_bond_8023ad_setup(uint8_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
+int
+rte_eth_bond_8023ad_setup_v1604(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
+int
+rte_eth_bond_8023ad_setup_v1607(uint8_t port_id,
+ struct rte_eth_bond_8023ad_conf *conf);
/**
* @internal
@@ -219,4 +235,71 @@ rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
}
#endif
+/**
+ * Configure a slave port to start collecting.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port id of valid slave.
+ * @param enabled Non-zero when collection enabled.
+ * @return
+ * 0 - if ok
+ * -EINVAL if slave is not valid.
+ */
+int
+rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled);
+
+/**
+ * Get COLLECTING flag from slave port actor state.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port id of valid slave.
+ * @return
+ * 0 - if not set
+ * 1 - if set
+ * -EINVAL if slave is not valid.
+ */
+int
+rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id);
+
+/**
+ * Configure a slave port to start distributing.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port id of valid slave.
+ * @param enabled Non-zero when distribution enabled.
+ * @return
+ * 0 - if ok
+ * -EINVAL if slave is not valid.
+ */
+int
+rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled);
+
+/**
+ * Get DISTRIBUTING flag from slave port actor state.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port id of valid slave.
+ * @return
+ * 0 - if not set
+ * 1 - if set
+ * -EINVAL if slave is not valid.
+ */
+int
+rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id);
+
+/**
+ * LACPDU transmit path for external 802.3ad state machine. Caller retains
+ * ownership of the packet on failure.
+ *
+ * @param port_id Bonding device id
+ * @param slave_id Port ID of valid slave device.
+ * @param lacp_pkt mbuf containing LACPDU.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
+ struct rte_mbuf *lacp_pkt);
+
#endif /* RTE_ETH_BOND_8023AD_H_ */
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
index 8adee70b..ca8858be 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h
@@ -173,6 +173,8 @@ struct mode8023ad_private {
uint64_t tx_period_timeout;
uint64_t rx_marker_timeout;
uint64_t update_timeout_us;
+ rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
+ uint8_t external_sm;
};
/**
@@ -185,18 +187,6 @@ extern struct port mode_8023ad_ports[];
/* Forward declaration */
struct bond_dev_private;
-/**
- * @internal
- *
- * Get configuration of bonded interface.
- *
- *
- * @param dev Bonded interface
- * @param conf returned configuration
- */
-void
-bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
- struct rte_eth_bond_8023ad_conf *conf);
/**
* @internal
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index 3157543e..38f5c4d4 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -90,14 +90,14 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
if (internals->mode6.mempool == NULL) {
RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n",
bond_dev->data->name);
- rte_panic(
- "Failed to allocate memory pool ('%s')\n"
- "for bond device '%s'\n",
- mem_name, bond_dev->data->name);
+ goto mempool_alloc_error;
}
}
return 0;
+
+mempool_alloc_error:
+ return -ENOMEM;
}
void bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset,
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index e9247b5f..203ebe9e 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -60,18 +60,14 @@ check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
int
valid_bonded_port_id(uint8_t port_id)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return -1;
-
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
}
int
valid_slave_port_id(uint8_t port_id)
{
- /* Verify that port id's are valid */
- if (!rte_eth_dev_is_valid_port(port_id))
- return -1;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
/* Verify that port_id refers to a non bonded port */
if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0)
@@ -95,7 +91,7 @@ activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
internals->tlb_slaves_order[active_count] = port_id;
}
- RTE_VERIFY(internals->active_slave_count <
+ RTE_ASSERT(internals->active_slave_count <
(RTE_DIM(internals->active_slaves) - 1));
internals->active_slaves[internals->active_slave_count] = port_id;
@@ -134,7 +130,7 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
sizeof(internals->active_slaves[0]));
}
- RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
+ RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
internals->active_slave_count = active_count;
if (eth_dev->data->dev_started) {
@@ -247,6 +243,8 @@ rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
internals->active_slave_count = 0;
internals->rx_offload_capa = 0;
internals->tx_offload_capa = 0;
+ internals->candidate_max_rx_pktlen = 0;
+ internals->max_rx_pktlen = 0;
/* Initially allow to choose any offload type */
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
@@ -331,9 +329,15 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
/* Add slave details to bonded device */
slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
- slave_add(internals, slave_eth_dev);
rte_eth_dev_info_get(slave_port_id, &dev_info);
+ if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) {
+ RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small",
+ slave_port_id);
+ return -1;
+ }
+
+ slave_add(internals, slave_eth_dev);
/* We need to store slaves reta_size to be able to synchronize RETA for all
* slave devices even if its sizes are different.
@@ -365,6 +369,9 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
internals->tx_offload_capa = dev_info.tx_offload_capa;
internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
+ /* Inherit first slave's max rx packet size */
+ internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen;
+
} else {
/* Check slave link properties are supported if props are set,
* all slaves must be the same */
@@ -391,6 +398,9 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
if (internals->reta_size > dev_info.reta_size)
internals->reta_size = dev_info.reta_size;
+ if (!internals->max_rx_pktlen &&
+ dev_info.max_rx_pktlen < internals->candidate_max_rx_pktlen)
+ internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen;
}
bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
@@ -536,6 +546,8 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
internals->tx_offload_capa = 0;
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
internals->reta_size = 0;
+ internals->candidate_max_rx_pktlen = 0;
+ internals->max_rx_pktlen = 0;
}
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 54788cf8..9a2518fb 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1608,11 +1608,11 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
for (i = 0; i < internals->active_slave_count; i++) {
port = &mode_8023ad_ports[internals->active_slaves[i]];
- RTE_VERIFY(port->rx_ring != NULL);
+ RTE_ASSERT(port->rx_ring != NULL);
while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
- RTE_VERIFY(port->tx_ring != NULL);
+ RTE_ASSERT(port->tx_ring != NULL);
while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT)
rte_pktmbuf_free(pkt);
}
@@ -1650,7 +1650,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = (uint32_t)2048;
+ dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
+ internals->candidate_max_rx_pktlen : 2048;
dev_info->max_rx_queues = (uint16_t)128;
dev_info->max_tx_queues = (uint16_t)512;
@@ -1836,7 +1837,6 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->imissed += slave_stats.imissed;
stats->ierrors += slave_stats.ierrors;
stats->oerrors += slave_stats.oerrors;
- stats->imcasts += slave_stats.imcasts;
stats->rx_nombuf += slave_stats.rx_nombuf;
for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
@@ -2294,6 +2294,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
}
}
+ /* set the max_rx_pktlen */
+ internals->max_rx_pktlen = internals->candidate_max_rx_pktlen;
+
/*
* if no kvlist, it means that this bonded device has been created
* through the bonding api.
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 83123978..2bdc9efa 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -169,6 +169,9 @@ struct bond_dev_private {
struct rte_kvargs *kvlist;
uint8_t slave_update_idx;
+
+ uint32_t candidate_max_rx_pktlen;
+ uint32_t max_rx_pktlen;
};
extern const struct eth_dev_ops default_dev_ops;
diff --git a/drivers/net/bonding/rte_eth_bond_version.map b/drivers/net/bonding/rte_eth_bond_version.map
index 22bd9200..2de0a7d3 100644
--- a/drivers/net/bonding/rte_eth_bond_version.map
+++ b/drivers/net/bonding/rte_eth_bond_version.map
@@ -27,3 +27,19 @@ DPDK_2.1 {
rte_eth_bond_free;
} DPDK_2.0;
+
+DPDK_16.04 {
+};
+
+DPDK_16.07 {
+ global:
+
+ rte_eth_bond_8023ad_ext_collect;
+ rte_eth_bond_8023ad_ext_collect_get;
+ rte_eth_bond_8023ad_ext_distrib;
+ rte_eth_bond_8023ad_ext_distrib_get;
+ rte_eth_bond_8023ad_ext_slowtx;
+ rte_eth_bond_8023ad_conf_get;
+ rte_eth_bond_8023ad_setup;
+
+} DPDK_16.04;
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index 07119764..bfcc3159 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -45,7 +45,7 @@ EXPORT_MAP := rte_pmd_cxgbe_version.map
LIBABIVER := 1
-ifeq ($(CC), icc)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
@@ -54,9 +54,11 @@ else
#
# CFLAGS for gcc/clang
#
-ifeq ($(shell test $(CC) = gcc && test $(GCC_VERSION) -ge 44 && echo 1), 1)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS += -Wno-deprecated
endif
+endif
CFLAGS_BASE_DRIVER =
endif
@@ -82,6 +84,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_net lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_net
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index a5225c0e..5e3bd509 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2016 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -318,6 +318,9 @@ struct adapter {
unsigned int mbox; /* associated mailbox */
unsigned int pf; /* associated physical function id */
+ unsigned int vpd_busy;
+ unsigned int vpd_flag;
+
int use_unpacked_mode; /* unpacked rx mode state */
};
@@ -427,6 +430,139 @@ static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
CXGBE_WRITE_REG64(adapter, reg_addr, val);
}
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
+#define PCI_CAPABILITY_LIST 0x34
+/* Offset of first capability list entry */
+#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
+#define PCI_CAP_LIST_ID 0 /* Capability ID */
+#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
+#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
+#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
+#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
+
+/**
+ * t4_os_pci_write_cfg4 - 32-bit write to PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given register in PCI config space.
+ */
+static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
+ off_t val)
+{
+ u32 val32 = val;
+
+ if (rte_eal_pci_write_config(adapter->pdev, &val32, sizeof(val32),
+ addr) < 0)
+ dev_err(adapter, "Can't write to PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 32-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr,
+ u32 *val)
+{
+ if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_pci_write_cfg2 - 16-bit write to PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: the value to write
+ *
+ * Write a 16-bit value into the given register in PCI config space.
+ */
+static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
+ off_t val)
+{
+ u16 val16 = val;
+
+ if (rte_eal_pci_write_config(adapter->pdev, &val16, sizeof(val16),
+ addr) < 0)
+ dev_err(adapter, "Can't write to PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 16-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
+ u16 *val)
+{
+ if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg - read a 8-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 8-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr,
+ u8 *val)
+{
+ if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_find_pci_capability - lookup a capability in the PCI capability list
+ * @adapter: the adapter
+ * @cap: the capability
+ *
+ * Return the address of the given capability within the PCI capability list.
+ */
+static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
+{
+ u16 status;
+ int ttl = 48;
+ u8 pos = 0;
+ u8 id = 0;
+
+ t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status);
+ if (!(status & PCI_STATUS_CAP_LIST)) {
+ dev_err(adapter, "PCIe capability reading failed\n");
+ return -1;
+ }
+
+ t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos);
+ while (ttl-- && pos >= 0x40) {
+ pos &= ~3;
+ t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id);
+
+ if (id == 0xff)
+ break;
+
+ if (id == cap)
+ return (int)pos;
+
+ t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos);
+ }
+ return 0;
+}
+
/**
* t4_os_set_hw_addr - store a port's MAC address in SW
* @adapter: the adapter
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index cf2e82dd..11f139c9 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2016 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +50,10 @@ enum {
};
enum {
+ T5_REGMAP_SIZE = (332 * 1024),
+};
+
+enum {
MEMWIN0_APERTURE = 2048,
MEMWIN0_BASE = 0x1b800,
};
@@ -398,4 +402,9 @@ int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+unsigned int t4_get_regs_len(struct adapter *adap);
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
+int t4_seeprom_wp(struct adapter *adapter, int enable);
#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 79af8067..7e79adf6 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2016 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -570,6 +570,1031 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
}
/**
+ * t4_get_regs_len - return the size of the chips register set
+ * @adapter: the adapter
+ *
+ * Returns the size of the chip's BAR0 register space.
+ */
+unsigned int t4_get_regs_len(struct adapter *adapter)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ switch (chip_version) {
+ case CHELSIO_T5:
+ return T5_REGMAP_SIZE;
+ }
+
+ dev_err(adapter,
+ "Unsupported chip version %d\n", chip_version);
+ return 0;
+}
+
+/**
+ * t4_get_regs - read chip registers into provided buffer
+ * @adap: the adapter
+ * @buf: register buffer
+ * @buf_size: size (in bytes) of register buffer
+ *
+ * If the provided register buffer isn't large enough for the chip's
+ * full register range, the register dump will be truncated to the
+ * register buffer's size.
+ */
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
+{
+ static const unsigned int t5_reg_ranges[] = {
+ 0x1008, 0x10c0,
+ 0x10cc, 0x10f8,
+ 0x1100, 0x1100,
+ 0x110c, 0x1148,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1280, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3028,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a70,
+ 0x5a80, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x614c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x78fc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d80,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
+ 0x8ea0, 0x8f84,
+ 0x8fc0, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9400, 0x9408,
+ 0x9410, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x96f4,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd004,
+ 0xd010, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0x1106c,
+ 0x11074, 0x11088,
+ 0x1109c, 0x1117c,
+ 0x11190, 0x11204,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c08,
+ 0x19c10, 0x19c60,
+ 0x19c94, 0x19ce4,
+ 0x19cf0, 0x19d40,
+ 0x19d50, 0x19d94,
+ 0x19da0, 0x19de8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e90,
+ 0x19ea0, 0x19f24,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fb4,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30038, 0x30038,
+ 0x30040, 0x30040,
+ 0x30100, 0x30144,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x30828,
+ 0x30834, 0x30834,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309ac,
+ 0x30a00, 0x30a14,
+ 0x30a1c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d00,
+ 0x30d08, 0x30d14,
+ 0x30d1c, 0x30d20,
+ 0x30d3c, 0x30d3c,
+ 0x30d48, 0x30d50,
+ 0x31200, 0x3120c,
+ 0x31220, 0x31220,
+ 0x31240, 0x31240,
+ 0x31600, 0x3160c,
+ 0x31a00, 0x31a1c,
+ 0x31e00, 0x31e20,
+ 0x31e38, 0x31e3c,
+ 0x31e80, 0x31e80,
+ 0x31e88, 0x31ea8,
+ 0x31eb0, 0x31eb4,
+ 0x31ec8, 0x31ed4,
+ 0x31fb8, 0x32004,
+ 0x32200, 0x32200,
+ 0x32208, 0x32240,
+ 0x32248, 0x32280,
+ 0x32288, 0x322c0,
+ 0x322c8, 0x322fc,
+ 0x32600, 0x32630,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b10,
+ 0x32b20, 0x32b30,
+ 0x32b40, 0x32b50,
+ 0x32b60, 0x32b70,
+ 0x33000, 0x33028,
+ 0x33030, 0x33048,
+ 0x33060, 0x33068,
+ 0x33070, 0x3309c,
+ 0x330f0, 0x33128,
+ 0x33130, 0x33148,
+ 0x33160, 0x33168,
+ 0x33170, 0x3319c,
+ 0x331f0, 0x33238,
+ 0x33240, 0x33240,
+ 0x33248, 0x33250,
+ 0x3325c, 0x33264,
+ 0x33270, 0x332b8,
+ 0x332c0, 0x332e4,
+ 0x332f8, 0x33338,
+ 0x33340, 0x33340,
+ 0x33348, 0x33350,
+ 0x3335c, 0x33364,
+ 0x33370, 0x333b8,
+ 0x333c0, 0x333e4,
+ 0x333f8, 0x33428,
+ 0x33430, 0x33448,
+ 0x33460, 0x33468,
+ 0x33470, 0x3349c,
+ 0x334f0, 0x33528,
+ 0x33530, 0x33548,
+ 0x33560, 0x33568,
+ 0x33570, 0x3359c,
+ 0x335f0, 0x33638,
+ 0x33640, 0x33640,
+ 0x33648, 0x33650,
+ 0x3365c, 0x33664,
+ 0x33670, 0x336b8,
+ 0x336c0, 0x336e4,
+ 0x336f8, 0x33738,
+ 0x33740, 0x33740,
+ 0x33748, 0x33750,
+ 0x3375c, 0x33764,
+ 0x33770, 0x337b8,
+ 0x337c0, 0x337e4,
+ 0x337f8, 0x337fc,
+ 0x33814, 0x33814,
+ 0x3382c, 0x3382c,
+ 0x33880, 0x3388c,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x33928,
+ 0x33930, 0x33948,
+ 0x33960, 0x33968,
+ 0x33970, 0x3399c,
+ 0x339f0, 0x33a38,
+ 0x33a40, 0x33a40,
+ 0x33a48, 0x33a50,
+ 0x33a5c, 0x33a64,
+ 0x33a70, 0x33ab8,
+ 0x33ac0, 0x33ae4,
+ 0x33af8, 0x33b10,
+ 0x33b28, 0x33b28,
+ 0x33b3c, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c28, 0x33c28,
+ 0x33c3c, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34038, 0x34038,
+ 0x34040, 0x34040,
+ 0x34100, 0x34144,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x34828,
+ 0x34834, 0x34834,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349ac,
+ 0x34a00, 0x34a14,
+ 0x34a1c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d00,
+ 0x34d08, 0x34d14,
+ 0x34d1c, 0x34d20,
+ 0x34d3c, 0x34d3c,
+ 0x34d48, 0x34d50,
+ 0x35200, 0x3520c,
+ 0x35220, 0x35220,
+ 0x35240, 0x35240,
+ 0x35600, 0x3560c,
+ 0x35a00, 0x35a1c,
+ 0x35e00, 0x35e20,
+ 0x35e38, 0x35e3c,
+ 0x35e80, 0x35e80,
+ 0x35e88, 0x35ea8,
+ 0x35eb0, 0x35eb4,
+ 0x35ec8, 0x35ed4,
+ 0x35fb8, 0x36004,
+ 0x36200, 0x36200,
+ 0x36208, 0x36240,
+ 0x36248, 0x36280,
+ 0x36288, 0x362c0,
+ 0x362c8, 0x362fc,
+ 0x36600, 0x36630,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b10,
+ 0x36b20, 0x36b30,
+ 0x36b40, 0x36b50,
+ 0x36b60, 0x36b70,
+ 0x37000, 0x37028,
+ 0x37030, 0x37048,
+ 0x37060, 0x37068,
+ 0x37070, 0x3709c,
+ 0x370f0, 0x37128,
+ 0x37130, 0x37148,
+ 0x37160, 0x37168,
+ 0x37170, 0x3719c,
+ 0x371f0, 0x37238,
+ 0x37240, 0x37240,
+ 0x37248, 0x37250,
+ 0x3725c, 0x37264,
+ 0x37270, 0x372b8,
+ 0x372c0, 0x372e4,
+ 0x372f8, 0x37338,
+ 0x37340, 0x37340,
+ 0x37348, 0x37350,
+ 0x3735c, 0x37364,
+ 0x37370, 0x373b8,
+ 0x373c0, 0x373e4,
+ 0x373f8, 0x37428,
+ 0x37430, 0x37448,
+ 0x37460, 0x37468,
+ 0x37470, 0x3749c,
+ 0x374f0, 0x37528,
+ 0x37530, 0x37548,
+ 0x37560, 0x37568,
+ 0x37570, 0x3759c,
+ 0x375f0, 0x37638,
+ 0x37640, 0x37640,
+ 0x37648, 0x37650,
+ 0x3765c, 0x37664,
+ 0x37670, 0x376b8,
+ 0x376c0, 0x376e4,
+ 0x376f8, 0x37738,
+ 0x37740, 0x37740,
+ 0x37748, 0x37750,
+ 0x3775c, 0x37764,
+ 0x37770, 0x377b8,
+ 0x377c0, 0x377e4,
+ 0x377f8, 0x377fc,
+ 0x37814, 0x37814,
+ 0x3782c, 0x3782c,
+ 0x37880, 0x3788c,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x37928,
+ 0x37930, 0x37948,
+ 0x37960, 0x37968,
+ 0x37970, 0x3799c,
+ 0x379f0, 0x37a38,
+ 0x37a40, 0x37a40,
+ 0x37a48, 0x37a50,
+ 0x37a5c, 0x37a64,
+ 0x37a70, 0x37ab8,
+ 0x37ac0, 0x37ae4,
+ 0x37af8, 0x37b10,
+ 0x37b28, 0x37b28,
+ 0x37b3c, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c28, 0x37c28,
+ 0x37c3c, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x38000, 0x38030,
+ 0x38038, 0x38038,
+ 0x38040, 0x38040,
+ 0x38100, 0x38144,
+ 0x38190, 0x381a0,
+ 0x381a8, 0x381b8,
+ 0x381c4, 0x381c8,
+ 0x381d0, 0x381d0,
+ 0x38200, 0x38318,
+ 0x38400, 0x384b4,
+ 0x384c0, 0x3852c,
+ 0x38540, 0x3861c,
+ 0x38800, 0x38828,
+ 0x38834, 0x38834,
+ 0x388c0, 0x38908,
+ 0x38910, 0x389ac,
+ 0x38a00, 0x38a14,
+ 0x38a1c, 0x38a2c,
+ 0x38a44, 0x38a50,
+ 0x38a74, 0x38a74,
+ 0x38a7c, 0x38afc,
+ 0x38b08, 0x38c24,
+ 0x38d00, 0x38d00,
+ 0x38d08, 0x38d14,
+ 0x38d1c, 0x38d20,
+ 0x38d3c, 0x38d3c,
+ 0x38d48, 0x38d50,
+ 0x39200, 0x3920c,
+ 0x39220, 0x39220,
+ 0x39240, 0x39240,
+ 0x39600, 0x3960c,
+ 0x39a00, 0x39a1c,
+ 0x39e00, 0x39e20,
+ 0x39e38, 0x39e3c,
+ 0x39e80, 0x39e80,
+ 0x39e88, 0x39ea8,
+ 0x39eb0, 0x39eb4,
+ 0x39ec8, 0x39ed4,
+ 0x39fb8, 0x3a004,
+ 0x3a200, 0x3a200,
+ 0x3a208, 0x3a240,
+ 0x3a248, 0x3a280,
+ 0x3a288, 0x3a2c0,
+ 0x3a2c8, 0x3a2fc,
+ 0x3a600, 0x3a630,
+ 0x3aa00, 0x3aabc,
+ 0x3ab00, 0x3ab10,
+ 0x3ab20, 0x3ab30,
+ 0x3ab40, 0x3ab50,
+ 0x3ab60, 0x3ab70,
+ 0x3b000, 0x3b028,
+ 0x3b030, 0x3b048,
+ 0x3b060, 0x3b068,
+ 0x3b070, 0x3b09c,
+ 0x3b0f0, 0x3b128,
+ 0x3b130, 0x3b148,
+ 0x3b160, 0x3b168,
+ 0x3b170, 0x3b19c,
+ 0x3b1f0, 0x3b238,
+ 0x3b240, 0x3b240,
+ 0x3b248, 0x3b250,
+ 0x3b25c, 0x3b264,
+ 0x3b270, 0x3b2b8,
+ 0x3b2c0, 0x3b2e4,
+ 0x3b2f8, 0x3b338,
+ 0x3b340, 0x3b340,
+ 0x3b348, 0x3b350,
+ 0x3b35c, 0x3b364,
+ 0x3b370, 0x3b3b8,
+ 0x3b3c0, 0x3b3e4,
+ 0x3b3f8, 0x3b428,
+ 0x3b430, 0x3b448,
+ 0x3b460, 0x3b468,
+ 0x3b470, 0x3b49c,
+ 0x3b4f0, 0x3b528,
+ 0x3b530, 0x3b548,
+ 0x3b560, 0x3b568,
+ 0x3b570, 0x3b59c,
+ 0x3b5f0, 0x3b638,
+ 0x3b640, 0x3b640,
+ 0x3b648, 0x3b650,
+ 0x3b65c, 0x3b664,
+ 0x3b670, 0x3b6b8,
+ 0x3b6c0, 0x3b6e4,
+ 0x3b6f8, 0x3b738,
+ 0x3b740, 0x3b740,
+ 0x3b748, 0x3b750,
+ 0x3b75c, 0x3b764,
+ 0x3b770, 0x3b7b8,
+ 0x3b7c0, 0x3b7e4,
+ 0x3b7f8, 0x3b7fc,
+ 0x3b814, 0x3b814,
+ 0x3b82c, 0x3b82c,
+ 0x3b880, 0x3b88c,
+ 0x3b8e8, 0x3b8ec,
+ 0x3b900, 0x3b928,
+ 0x3b930, 0x3b948,
+ 0x3b960, 0x3b968,
+ 0x3b970, 0x3b99c,
+ 0x3b9f0, 0x3ba38,
+ 0x3ba40, 0x3ba40,
+ 0x3ba48, 0x3ba50,
+ 0x3ba5c, 0x3ba64,
+ 0x3ba70, 0x3bab8,
+ 0x3bac0, 0x3bae4,
+ 0x3baf8, 0x3bb10,
+ 0x3bb28, 0x3bb28,
+ 0x3bb3c, 0x3bb50,
+ 0x3bbf0, 0x3bc10,
+ 0x3bc28, 0x3bc28,
+ 0x3bc3c, 0x3bc50,
+ 0x3bcf0, 0x3bcfc,
+ 0x3c000, 0x3c030,
+ 0x3c038, 0x3c038,
+ 0x3c040, 0x3c040,
+ 0x3c100, 0x3c144,
+ 0x3c190, 0x3c1a0,
+ 0x3c1a8, 0x3c1b8,
+ 0x3c1c4, 0x3c1c8,
+ 0x3c1d0, 0x3c1d0,
+ 0x3c200, 0x3c318,
+ 0x3c400, 0x3c4b4,
+ 0x3c4c0, 0x3c52c,
+ 0x3c540, 0x3c61c,
+ 0x3c800, 0x3c828,
+ 0x3c834, 0x3c834,
+ 0x3c8c0, 0x3c908,
+ 0x3c910, 0x3c9ac,
+ 0x3ca00, 0x3ca14,
+ 0x3ca1c, 0x3ca2c,
+ 0x3ca44, 0x3ca50,
+ 0x3ca74, 0x3ca74,
+ 0x3ca7c, 0x3cafc,
+ 0x3cb08, 0x3cc24,
+ 0x3cd00, 0x3cd00,
+ 0x3cd08, 0x3cd14,
+ 0x3cd1c, 0x3cd20,
+ 0x3cd3c, 0x3cd3c,
+ 0x3cd48, 0x3cd50,
+ 0x3d200, 0x3d20c,
+ 0x3d220, 0x3d220,
+ 0x3d240, 0x3d240,
+ 0x3d600, 0x3d60c,
+ 0x3da00, 0x3da1c,
+ 0x3de00, 0x3de20,
+ 0x3de38, 0x3de3c,
+ 0x3de80, 0x3de80,
+ 0x3de88, 0x3dea8,
+ 0x3deb0, 0x3deb4,
+ 0x3dec8, 0x3ded4,
+ 0x3dfb8, 0x3e004,
+ 0x3e200, 0x3e200,
+ 0x3e208, 0x3e240,
+ 0x3e248, 0x3e280,
+ 0x3e288, 0x3e2c0,
+ 0x3e2c8, 0x3e2fc,
+ 0x3e600, 0x3e630,
+ 0x3ea00, 0x3eabc,
+ 0x3eb00, 0x3eb10,
+ 0x3eb20, 0x3eb30,
+ 0x3eb40, 0x3eb50,
+ 0x3eb60, 0x3eb70,
+ 0x3f000, 0x3f028,
+ 0x3f030, 0x3f048,
+ 0x3f060, 0x3f068,
+ 0x3f070, 0x3f09c,
+ 0x3f0f0, 0x3f128,
+ 0x3f130, 0x3f148,
+ 0x3f160, 0x3f168,
+ 0x3f170, 0x3f19c,
+ 0x3f1f0, 0x3f238,
+ 0x3f240, 0x3f240,
+ 0x3f248, 0x3f250,
+ 0x3f25c, 0x3f264,
+ 0x3f270, 0x3f2b8,
+ 0x3f2c0, 0x3f2e4,
+ 0x3f2f8, 0x3f338,
+ 0x3f340, 0x3f340,
+ 0x3f348, 0x3f350,
+ 0x3f35c, 0x3f364,
+ 0x3f370, 0x3f3b8,
+ 0x3f3c0, 0x3f3e4,
+ 0x3f3f8, 0x3f428,
+ 0x3f430, 0x3f448,
+ 0x3f460, 0x3f468,
+ 0x3f470, 0x3f49c,
+ 0x3f4f0, 0x3f528,
+ 0x3f530, 0x3f548,
+ 0x3f560, 0x3f568,
+ 0x3f570, 0x3f59c,
+ 0x3f5f0, 0x3f638,
+ 0x3f640, 0x3f640,
+ 0x3f648, 0x3f650,
+ 0x3f65c, 0x3f664,
+ 0x3f670, 0x3f6b8,
+ 0x3f6c0, 0x3f6e4,
+ 0x3f6f8, 0x3f738,
+ 0x3f740, 0x3f740,
+ 0x3f748, 0x3f750,
+ 0x3f75c, 0x3f764,
+ 0x3f770, 0x3f7b8,
+ 0x3f7c0, 0x3f7e4,
+ 0x3f7f8, 0x3f7fc,
+ 0x3f814, 0x3f814,
+ 0x3f82c, 0x3f82c,
+ 0x3f880, 0x3f88c,
+ 0x3f8e8, 0x3f8ec,
+ 0x3f900, 0x3f928,
+ 0x3f930, 0x3f948,
+ 0x3f960, 0x3f968,
+ 0x3f970, 0x3f99c,
+ 0x3f9f0, 0x3fa38,
+ 0x3fa40, 0x3fa40,
+ 0x3fa48, 0x3fa50,
+ 0x3fa5c, 0x3fa64,
+ 0x3fa70, 0x3fab8,
+ 0x3fac0, 0x3fae4,
+ 0x3faf8, 0x3fb10,
+ 0x3fb28, 0x3fb28,
+ 0x3fb3c, 0x3fb50,
+ 0x3fbf0, 0x3fc10,
+ 0x3fc28, 0x3fc28,
+ 0x3fc3c, 0x3fc50,
+ 0x3fcf0, 0x3fcfc,
+ 0x40000, 0x4000c,
+ 0x40040, 0x40050,
+ 0x40060, 0x40068,
+ 0x4007c, 0x4008c,
+ 0x40094, 0x400b0,
+ 0x400c0, 0x40144,
+ 0x40180, 0x4018c,
+ 0x40200, 0x40254,
+ 0x40260, 0x40264,
+ 0x40270, 0x40288,
+ 0x40290, 0x40298,
+ 0x402ac, 0x402c8,
+ 0x402d0, 0x402e0,
+ 0x402f0, 0x402f0,
+ 0x40300, 0x4033c,
+ 0x403f8, 0x403fc,
+ 0x41304, 0x413c4,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x44054,
+ 0x4405c, 0x44078,
+ 0x440c0, 0x44174,
+ 0x44180, 0x441ac,
+ 0x441b4, 0x441b8,
+ 0x441c0, 0x44254,
+ 0x4425c, 0x44278,
+ 0x442c0, 0x44374,
+ 0x44380, 0x443ac,
+ 0x443b4, 0x443b8,
+ 0x443c0, 0x44454,
+ 0x4445c, 0x44478,
+ 0x444c0, 0x44574,
+ 0x44580, 0x445ac,
+ 0x445b4, 0x445b8,
+ 0x445c0, 0x44654,
+ 0x4465c, 0x44678,
+ 0x446c0, 0x44774,
+ 0x44780, 0x447ac,
+ 0x447b4, 0x447b8,
+ 0x447c0, 0x44854,
+ 0x4485c, 0x44878,
+ 0x448c0, 0x44974,
+ 0x44980, 0x449ac,
+ 0x449b4, 0x449b8,
+ 0x449c0, 0x449fc,
+ 0x45000, 0x45004,
+ 0x45010, 0x45030,
+ 0x45040, 0x45060,
+ 0x45068, 0x45068,
+ 0x45080, 0x45084,
+ 0x450a0, 0x450b0,
+ 0x45200, 0x45204,
+ 0x45210, 0x45230,
+ 0x45240, 0x45260,
+ 0x45268, 0x45268,
+ 0x45280, 0x45284,
+ 0x452a0, 0x452b0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x48000, 0x4800c,
+ 0x48040, 0x48050,
+ 0x48060, 0x48068,
+ 0x4807c, 0x4808c,
+ 0x48094, 0x480b0,
+ 0x480c0, 0x48144,
+ 0x48180, 0x4818c,
+ 0x48200, 0x48254,
+ 0x48260, 0x48264,
+ 0x48270, 0x48288,
+ 0x48290, 0x48298,
+ 0x482ac, 0x482c8,
+ 0x482d0, 0x482e0,
+ 0x482f0, 0x482f0,
+ 0x48300, 0x4833c,
+ 0x483f8, 0x483fc,
+ 0x49304, 0x493c4,
+ 0x49400, 0x4940c,
+ 0x49414, 0x4941c,
+ 0x49480, 0x494d0,
+ 0x4c000, 0x4c054,
+ 0x4c05c, 0x4c078,
+ 0x4c0c0, 0x4c174,
+ 0x4c180, 0x4c1ac,
+ 0x4c1b4, 0x4c1b8,
+ 0x4c1c0, 0x4c254,
+ 0x4c25c, 0x4c278,
+ 0x4c2c0, 0x4c374,
+ 0x4c380, 0x4c3ac,
+ 0x4c3b4, 0x4c3b8,
+ 0x4c3c0, 0x4c454,
+ 0x4c45c, 0x4c478,
+ 0x4c4c0, 0x4c574,
+ 0x4c580, 0x4c5ac,
+ 0x4c5b4, 0x4c5b8,
+ 0x4c5c0, 0x4c654,
+ 0x4c65c, 0x4c678,
+ 0x4c6c0, 0x4c774,
+ 0x4c780, 0x4c7ac,
+ 0x4c7b4, 0x4c7b8,
+ 0x4c7c0, 0x4c854,
+ 0x4c85c, 0x4c878,
+ 0x4c8c0, 0x4c974,
+ 0x4c980, 0x4c9ac,
+ 0x4c9b4, 0x4c9b8,
+ 0x4c9c0, 0x4c9fc,
+ 0x4d000, 0x4d004,
+ 0x4d010, 0x4d030,
+ 0x4d040, 0x4d060,
+ 0x4d068, 0x4d068,
+ 0x4d080, 0x4d084,
+ 0x4d0a0, 0x4d0b0,
+ 0x4d200, 0x4d204,
+ 0x4d210, 0x4d230,
+ 0x4d240, 0x4d260,
+ 0x4d268, 0x4d268,
+ 0x4d280, 0x4d284,
+ 0x4d2a0, 0x4d2b0,
+ 0x4e0c0, 0x4e0e4,
+ 0x4f000, 0x4f03c,
+ 0x4f044, 0x4f08c,
+ 0x4f200, 0x4f250,
+ 0x4f400, 0x4f408,
+ 0x4f414, 0x4f420,
+ 0x4f600, 0x4f618,
+ 0x4f800, 0x4f814,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x5101c,
+ 0x51300, 0x51308,
+ };
+
+ u32 *buf_end = (u32 *)((char *)buf + buf_size);
+ const unsigned int *reg_ranges;
+ int reg_ranges_size, range;
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /* Select the right set of register ranges to dump depending on the
+ * adapter chip type.
+ */
+ switch (chip_version) {
+ case CHELSIO_T5:
+ reg_ranges = t5_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+ break;
+
+ default:
+ dev_err(adap,
+ "Unsupported chip version %d\n", chip_version);
+ return;
+ }
+
+ /* Clear the register buffer and insert the appropriate register
+ * values selected by the above register ranges.
+ */
+ memset(buf, 0, buf_size);
+ for (range = 0; range < reg_ranges_size; range += 2) {
+ unsigned int reg = reg_ranges[range];
+ unsigned int last_reg = reg_ranges[range + 1];
+ u32 *bufp = (u32 *)((char *)buf + reg);
+
+ /* Iterate across the register range filling in the register
+ * buffer but don't write past the end of the register buffer.
+ */
+ while (reg <= last_reg && bufp < buf_end) {
+ *bufp++ = t4_read_reg(adap, reg);
+ reg += sizeof(u32);
+ }
+ }
+}
+
+/* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
+#define EEPROM_DELAY 10 /* 10us per poll spin */
+#define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
+
+#define EEPROM_STAT_ADDR 0x7bfc
+
+/**
+ * Small utility function to wait till any outstanding VPD Access is complete.
+ * We have a per-adapter state variable "VPD Busy" to indicate when we have a
+ * VPD Access in flight. This allows us to handle the problem of having a
+ * previous VPD Access time out and prevent an attempt to inject a new VPD
+ * Request before any in-flight VPD request has completed.
+ */
+static int t4_seeprom_wait(struct adapter *adapter)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int max_poll;
+
+ /* If no VPD Access is in flight, we can just return success right
+ * away.
+ */
+ if (!adapter->vpd_busy)
+ return 0;
+
+ /* Poll the VPD Capability Address/Flag register waiting for it
+ * to indicate that the operation is complete.
+ */
+ max_poll = EEPROM_MAX_POLL;
+ do {
+ u16 val;
+
+ udelay(EEPROM_DELAY);
+ t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
+
+ /* If the operation is complete, mark the VPD as no longer
+ * busy and return success.
+ */
+ if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
+ adapter->vpd_busy = 0;
+ return 0;
+ }
+ } while (--max_poll);
+
+ /* Failure! Note that we leave the VPD Busy status set in order to
+ * avoid pushing a new VPD Access request into the VPD Capability till
+ * the current operation eventually succeeds. It's a bug to issue a
+ * new request when an existing request is in flight and will result
+ * in corrupt hardware state.
+ */
+ return -ETIMEDOUT;
+}
+
+/**
+ * t4_seeprom_read - read a serial EEPROM location
+ * @adapter: adapter to read
+ * @addr: EEPROM virtual address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int ret;
+
+ /* VPD Accesses must alway be 4-byte aligned!
+ */
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ /* Wait for any previous operation which may still be in flight to
+ * complete.
+ */
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD still busy from previous operation\n");
+ return ret;
+ }
+
+ /* Issue our new VPD Read request, mark the VPD as being busy and wait
+ * for our request to complete. If it doesn't complete, note the
+ * error and return it to our caller. Note that we do not reset the
+ * VPD Busy status!
+ */
+ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
+ adapter->vpd_busy = 1;
+ adapter->vpd_flag = PCI_VPD_ADDR_F;
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD read of address %#x failed\n", addr);
+ return ret;
+ }
+
+ /* Grab the returned data, swizzle it into our endianness and
+ * return success.
+ */
+ t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*data);
+ return 0;
+}
+
+/**
+ * t4_seeprom_write - write a serial EEPROM location
+ * @adapter: adapter to write
+ * @addr: virtual EEPROM address
+ * @data: value to write
+ *
+ * Write a 32-bit word to a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int ret;
+ u32 stats_reg;
+ int max_poll;
+
+ /* VPD Accesses must alway be 4-byte aligned!
+ */
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ /* Wait for any previous operation which may still be in flight to
+ * complete.
+ */
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD still busy from previous operation\n");
+ return ret;
+ }
+
+ /* Issue our new VPD Read request, mark the VPD as being busy and wait
+ * for our request to complete. If it doesn't complete, note the
+ * error and return it to our caller. Note that we do not reset the
+ * VPD Busy status!
+ */
+ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
+ cpu_to_le32(data));
+ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
+ (u16)addr | PCI_VPD_ADDR_F);
+ adapter->vpd_busy = 1;
+ adapter->vpd_flag = 0;
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD write of address %#x failed\n", addr);
+ return ret;
+ }
+
+ /* Reset PCI_VPD_DATA register after a transaction and wait for our
+ * request to complete. If it doesn't complete, return error.
+ */
+ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
+ max_poll = EEPROM_MAX_POLL;
+ do {
+ udelay(EEPROM_DELAY);
+ t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
+ } while ((stats_reg & 0x1) && --max_poll);
+ if (!max_poll)
+ return -ETIMEDOUT;
+
+ /* Return success! */
+ return 0;
+}
+
+/**
+ * t4_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: whether to enable or disable write protection
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t4_seeprom_wp(struct adapter *adapter, int enable)
+{
+ return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+}
+
+/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
* @mbox: mbox to use for the FW command
@@ -2326,6 +3351,21 @@ int t4_get_flash_params(struct adapter *adapter)
return 0;
}
+static void set_pcie_completion_timeout(struct adapter *adapter,
+ u8 range)
+{
+ u32 pcie_cap;
+ u16 val;
+
+ pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= 0xfff0;
+ val |= range;
+ t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -2369,6 +3409,9 @@ int t4_prep_adapter(struct adapter *adapter)
return -EINVAL;
}
+ adapter->params.pci.vpd_cap_addr =
+ t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+
ret = t4_get_flash_params(adapter);
if (ret < 0)
return ret;
@@ -2384,6 +3427,8 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
+ /* Set pci completion timeout value to 4 seconds. */
+ set_pcie_completion_timeout(adapter, 0xd);
return 0;
}
diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h
index bf623cf4..5e62c417 100644
--- a/drivers/net/cxgbe/base/t4_hw.h
+++ b/drivers/net/cxgbe/base/t4_hw.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2016 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,6 +36,9 @@
enum {
NCHAN = 4, /* # of HW channels */
+ EEPROMSIZE = 17408, /* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
MBOX_LEN = 64, /* mailbox size in bytes */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index bb134e50..6c130ed2 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -656,7 +656,6 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
/* RX Stats */
eth_stats->ipackets = ps.rx_frames;
eth_stats->ibytes = ps.rx_octets;
- eth_stats->imcasts = ps.rx_mcast_frames;
eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
ps.rx_ovflow2 + ps.rx_ovflow3 +
ps.rx_trunc0 + ps.rx_trunc1 +
@@ -782,6 +781,168 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
+static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+ return EEPROMSIZE;
+}
+
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
+ */
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+ fn *= sz;
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return fn + phys_addr - 1024;
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024 - fn;
+ if (phys_addr < EEPROMVSIZE)
+ return phys_addr - 1024;
+ return -EINVAL;
+}
+
+/* The next two routines implement eeprom read/write from physical addresses.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = t4_seeprom_read(adap, vaddr, v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = t4_seeprom_write(adap, vaddr, v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *e)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ u32 i, err = 0;
+ u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
+
+ if (!buf)
+ return -ENOMEM;
+
+ e->magic = EEPROM_MAGIC;
+ for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
+ err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+ if (!err)
+ rte_memcpy(e->data, buf + e->offset, e->length);
+ rte_free(buf);
+ return err;
+}
+
+static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ u8 *buf;
+ int err = 0;
+ u32 aligned_offset, aligned_len, *p;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
+
+ if (adapter->pf > 0) {
+ u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
+ /* RMW possibly needed for first or last words.
+ */
+ buf = rte_zmalloc(NULL, aligned_len, 0);
+ if (!buf)
+ return -ENOMEM;
+ err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+ if (!err && aligned_len > 4)
+ err = eeprom_rd_phys(adapter,
+ aligned_offset + aligned_len - 4,
+ (u32 *)&buf[aligned_len - 4]);
+ if (err)
+ goto out;
+ rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
+ eeprom->length);
+ } else {
+ buf = eeprom->data;
+ }
+
+ err = t4_seeprom_wp(adapter, false);
+ if (err)
+ goto out;
+
+ for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+ err = eeprom_wr_phys(adapter, aligned_offset, *p);
+ aligned_offset += 4;
+ }
+
+ if (!err)
+ err = t4_seeprom_wp(adapter, true);
+out:
+ if (buf != eeprom->data)
+ rte_free(buf);
+ return err;
+}
+
+static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ return t4_get_regs_len(adapter) / sizeof(uint32_t);
+}
+
+static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+
+ regs->length = cxgbe_get_regs_len(eth_dev);
+ regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
+ (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
+ (1 << 16);
+ t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
+
+ return 0;
+}
+
static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_start = cxgbe_dev_start,
.dev_stop = cxgbe_dev_stop,
@@ -807,6 +968,11 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.stats_reset = cxgbe_dev_stats_reset,
.flow_ctrl_get = cxgbe_flow_ctrl_get,
.flow_ctrl_set = cxgbe_flow_ctrl_set,
+ .get_eeprom_length = cxgbe_get_eeprom_length,
+ .get_eeprom = cxgbe_get_eeprom,
+ .set_eeprom = cxgbe_set_eeprom,
+ .get_reg_length = cxgbe_get_regs_len,
+ .get_reg = cxgbe_get_regs,
};
/*
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index f4879e67..57a60f0f 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -43,21 +43,23 @@ EXPORT_MAP := rte_pmd_e1000_version.map
LIBABIVER := 1
-ifeq ($(CC), icc)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259
else
#
-# CFLAGS for gcc
+# CFLAGS for gcc/clang
#
CFLAGS_BASE_DRIVER = -Wno-uninitialized -Wno-unused-parameter
CFLAGS_BASE_DRIVER += -Wno-unused-variable
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
CFLAGS_BASE_DRIVER += -Wno-misleading-indentation
endif
endif
+endif
#
# Add extra flags for base driver files (also known as shared code)
diff --git a/drivers/net/e1000/base/e1000_phy.c b/drivers/net/e1000/base/e1000_phy.c
index d43b7ce0..33f478b1 100644
--- a/drivers/net/e1000/base/e1000_phy.c
+++ b/drivers/net/e1000/base/e1000_phy.c
@@ -4153,12 +4153,13 @@ s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
*data = E1000_READ_REG(hw, E1000_MPHY_DATA);
/* Disable access to mPHY if it was originally disabled */
- if (locked)
+ if (locked) {
ready = e1000_is_mphy_ready(hw);
if (!ready)
return -E1000_ERR_PHY;
E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
E1000_MPHY_DIS_ACCESS);
+ }
return E1000_SUCCESS;
}
@@ -4218,12 +4219,13 @@ s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
/* Disable access to mPHY if it was originally disabled */
- if (locked)
+ if (locked) {
ready = e1000_is_mphy_ready(hw);
if (!ready)
return -E1000_ERR_PHY;
E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
E1000_MPHY_DIS_ACCESS);
+ }
return E1000_SUCCESS;
}
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 441ccad8..6d8750a8 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -78,16 +78,6 @@
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@@ -639,7 +629,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
uint64_t pkt_flags;
/* Check if VLAN present */
- pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0);
+ pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
return pkt_flags;
}
@@ -729,7 +720,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) status,
(unsigned) rte_le_to_cpu_16(rxd.length));
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u",
@@ -909,7 +900,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) status,
(unsigned) rte_le_to_cpu_16(rxd.length));
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@@ -1561,7 +1552,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
/* Initialize software ring entries */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile struct e1000_rx_desc *rxd;
- struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index e0053fec..5067d208 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -86,9 +86,24 @@
#define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
#define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
+#define E1000_VTIVAR_MISC 0x01740
+#define E1000_VTIVAR_MISC_MASK 0xFF
+#define E1000_VTIVAR_VALID 0x80
+#define E1000_VTIVAR_MISC_MAILBOX 0
+#define E1000_VTIVAR_MISC_INTR_MASK 0x3
+
+/* External VLAN Enable bit mask */
+#define E1000_CTRL_EXT_EXT_VLAN (1 << 26)
+
+/* External VLAN Ether Type bit mask and shift */
+#define E1000_VET_VET_EXT 0xFFFF0000
+#define E1000_VET_VET_EXT_SHIFT 16
+
static int eth_igb_configure(struct rte_eth_dev *dev);
static int eth_igb_start(struct rte_eth_dev *dev);
static void eth_igb_stop(struct rte_eth_dev *dev);
+static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
+static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
static void eth_igb_close(struct rte_eth_dev *dev);
static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
@@ -99,7 +114,10 @@ static int eth_igb_link_update(struct rte_eth_dev *dev,
static void eth_igb_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static int eth_igb_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
+static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
static void eth_igb_stats_reset(struct rte_eth_dev *dev);
static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
static void eth_igb_infos_get(struct rte_eth_dev *dev,
@@ -164,7 +182,10 @@ static int eth_igbvf_link_update(struct e1000_hw *hw);
static void eth_igbvf_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
+static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
@@ -259,6 +280,9 @@ static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
uint8_t index, uint8_t offset);
static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
+static void eth_igbvf_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
+static void igbvf_mbx_process(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -316,6 +340,8 @@ static const struct eth_dev_ops eth_igb_ops = {
.dev_configure = eth_igb_configure,
.dev_start = eth_igb_start,
.dev_stop = eth_igb_stop,
+ .dev_set_link_up = eth_igb_dev_set_link_up,
+ .dev_set_link_down = eth_igb_dev_set_link_down,
.dev_close = eth_igb_close,
.promiscuous_enable = eth_igb_promiscuous_enable,
.promiscuous_disable = eth_igb_promiscuous_disable,
@@ -324,6 +350,7 @@ static const struct eth_dev_ops eth_igb_ops = {
.link_update = eth_igb_link_update,
.stats_get = eth_igb_stats_get,
.xstats_get = eth_igb_xstats_get,
+ .xstats_get_names = eth_igb_xstats_get_names,
.stats_reset = eth_igb_stats_reset,
.xstats_reset = eth_igb_xstats_reset,
.dev_infos_get = eth_igb_infos_get,
@@ -385,6 +412,7 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.link_update = eth_igb_link_update,
.stats_get = eth_igbvf_stats_get,
.xstats_get = eth_igbvf_xstats_get,
+ .xstats_get_names = eth_igbvf_xstats_get_names,
.stats_reset = eth_igbvf_stats_reset,
.xstats_reset = eth_igbvf_stats_reset,
.vlan_filter_set = igbvf_vlan_filter_set,
@@ -554,6 +582,41 @@ igb_intr_disable(struct e1000_hw *hw)
E1000_WRITE_FLUSH(hw);
}
+static inline void
+igbvf_intr_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* only for mailbox */
+ E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
+ E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
+ E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/* only for mailbox now. If RX/TX needed, should extend this function. */
+static void
+igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
+{
+ uint32_t tmp = 0;
+
+ /* mailbox */
+ tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
+ tmp |= E1000_VTIVAR_VALID;
+ E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
+}
+
+static void
+eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Configure VF other cause ivar */
+ igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
+}
+
static inline int32_t
igb_pf_reset_hw(struct e1000_hw *hw)
{
@@ -942,6 +1005,10 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "igb_mac_82576_vf");
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ eth_igbvf_interrupt_handler,
+ (void *)eth_dev);
+
return 0;
}
@@ -950,6 +1017,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev = eth_dev->pci_dev;
PMD_INIT_FUNC_TRACE();
@@ -966,6 +1034,12 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ eth_igbvf_interrupt_handler,
+ (void *)eth_dev);
+
return 0;
}
@@ -1143,7 +1217,7 @@ eth_igb_start(struct rte_eth_dev *dev)
rte_intr_disable(intr_handle);
/* Power up the phy. Needed to make the link go Up */
- e1000_power_up_phy(hw);
+ eth_igb_dev_set_link_up(dev);
/*
* Packet Buffer Allocation (PBA)
@@ -1349,10 +1423,7 @@ eth_igb_stop(struct rte_eth_dev *dev)
}
/* Power down the phy. Needed to make the link go Down */
- if (hw->phy.media_type == e1000_media_type_copper)
- e1000_power_down_phy(hw);
- else
- e1000_shutdown_fiber_serdes_link(hw);
+ eth_igb_dev_set_link_down(dev);
igb_dev_clear_queues(dev);
@@ -1399,6 +1470,32 @@ eth_igb_stop(struct rte_eth_dev *dev)
}
}
+static int
+eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->phy.media_type == e1000_media_type_copper)
+ e1000_power_up_phy(hw);
+ else
+ e1000_power_up_fiber_serdes_link(hw);
+
+ return 0;
+}
+
+static int
+eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->phy.media_type == e1000_media_type_copper)
+ e1000_power_down_phy(hw);
+ else
+ e1000_shutdown_fiber_serdes_link(hw);
+
+ return 0;
+}
+
static void
eth_igb_close(struct rte_eth_dev *dev)
{
@@ -1691,8 +1788,27 @@ eth_igb_xstats_reset(struct rte_eth_dev *dev)
memset(stats, 0, sizeof(*stats));
}
+static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned i;
+
+ if (xstats_names == NULL)
+ return IGB_NB_XSTATS;
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ for (i = 0; i < IGB_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
+ "%s", rte_igb_stats_strings[i].name);
+ }
+
+ return IGB_NB_XSTATS;
+}
+
static int
-eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1713,8 +1829,7 @@ eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* Extended stats */
for (i = 0; i < IGB_NB_XSTATS; i++) {
- snprintf(xstats[i].name, sizeof(xstats[i].name),
- "%s", rte_igb_stats_strings[i].name);
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_igb_stats_strings[i].offset);
}
@@ -1762,8 +1877,23 @@ igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
hw_stats->last_gotlbc, hw_stats->gotlbc);
}
+static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned i;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < IGBVF_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name), "%s",
+ rte_igbvf_stats_strings[i].name);
+ }
+ return IGBVF_NB_XSTATS;
+}
+
static int
-eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1780,8 +1910,7 @@ eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
return 0;
for (i = 0; i < IGBVF_NB_XSTATS; i++) {
- snprintf(xstats[i].name, sizeof(xstats[i].name), "%s",
- rte_igbvf_stats_strings[i].name);
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_igbvf_stats_strings[i].offset);
}
@@ -1805,11 +1934,6 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
rte_stats->ibytes = hw_stats->gorc;
rte_stats->opackets = hw_stats->gptc;
rte_stats->obytes = hw_stats->gotc;
- rte_stats->imcasts = hw_stats->mprc;
- rte_stats->ilbpackets = hw_stats->gprlbc;
- rte_stats->ilbbytes = hw_stats->gorlbc;
- rte_stats->olbpackets = hw_stats->gptlbc;
- rte_stats->olbbytes = hw_stats->gotlbc;
}
static void
@@ -2242,21 +2366,25 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t reg = ETHER_TYPE_VLAN;
- int ret = 0;
+ uint32_t reg, qinq;
- switch (vlan_type) {
- case ETH_VLAN_TYPE_INNER:
- reg |= (tpid << 16);
+ qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ qinq &= E1000_CTRL_EXT_EXT_VLAN;
+
+ /* only outer TPID of double VLAN can be configured*/
+ if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
+ reg = E1000_READ_REG(hw, E1000_VET);
+ reg = (reg & (~E1000_VET_VET_EXT)) |
+ ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
E1000_WRITE_REG(hw, E1000_VET, reg);
- break;
- default:
- ret = -EINVAL;
- PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
- break;
+
+ return 0;
}
- return ret;
+ /* all other TPID values are read-only*/
+ PMD_DRV_LOG(ERR, "Not supported");
+
+ return -ENOTSUP;
}
static void
@@ -2569,6 +2697,69 @@ eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
}
static int
+eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ igbvf_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = E1000_READ_REG(hw, E1000_EICR);
+ intr->flags = 0;
+
+ if (eicr == E1000_VTIVAR_MISC_MAILBOX)
+ intr->flags |= E1000_FLAG_MAILBOX;
+
+ return 0;
+}
+
+void igbvf_mbx_process(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ u32 in_msg = 0;
+
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ return;
+
+ /* PF reset VF event */
+ if (in_msg == E1000_PF_CONTROL_MSG)
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+}
+
+static int
+eth_igbvf_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct e1000_interrupt *intr =
+ E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (intr->flags & E1000_FLAG_MAILBOX) {
+ igbvf_mbx_process(dev);
+ intr->flags &= ~E1000_FLAG_MAILBOX;
+ }
+
+ igbvf_intr_enable(dev);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static void
+eth_igbvf_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ eth_igbvf_interrupt_get_status(dev);
+ eth_igbvf_interrupt_action(dev);
+}
+
+static int
eth_igb_led_on(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
@@ -2839,6 +3030,8 @@ igbvf_dev_start(struct rte_eth_dev *dev)
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
int ret;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
@@ -2858,12 +3051,41 @@ igbvf_dev_start(struct rte_eth_dev *dev)
return ret;
}
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ intr_vector = dev->data->nb_rx_queues;
+ ret = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (ret)
+ return ret;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ eth_igbvf_configure_msix_intr(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ /* resume enabled intr since hw reset */
+ igbvf_intr_enable(dev);
+
return 0;
}
static void
igbvf_dev_stop(struct rte_eth_dev *dev)
{
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
PMD_INIT_FUNC_TRACE();
igbvf_stop_adapter(dev);
@@ -2875,6 +3097,16 @@ igbvf_dev_stop(struct rte_eth_dev *dev)
igbvf_set_vfta_all(dev,0);
igb_dev_clear_queues(dev);
+
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
static void
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 4a987e3c..9d80a0b3 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -79,16 +79,6 @@
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@@ -739,7 +729,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
uint64_t pkt_flags;
/* Check if VLAN present */
- pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+ pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
#if defined(RTE_LIBRTE_IEEE1588)
if (rx_status & E1000_RXD_STAT_TMST)
@@ -838,7 +829,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@@ -1021,7 +1012,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@@ -1957,7 +1948,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
/* Initialize software ring entries. */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union e1000_adv_rx_desc *rxd;
- struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
index ac2b55dc..a0d3358d 100644
--- a/drivers/net/ena/Makefile
+++ b/drivers/net/ena/Makefile
@@ -54,7 +54,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net
CFLAGS += $(INCLUDES)
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index aab2ac86..5f693301 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -93,14 +93,18 @@ typedef uint64_t dma_addr_t;
#define ENA_GET_SYSTEM_USECS() \
(rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
#define ENA_ASSERT(cond, format, arg...) \
do { \
if (unlikely(!(cond))) { \
- printf("Assertion failed on %s:%s:%d: " format, \
- __FILE__, __func__, __LINE__, ##arg); \
- rte_exit(EXIT_FAILURE, "ASSERTION FAILED\n"); \
+ RTE_LOG(ERR, PMD, format, ##arg); \
+ rte_panic("line %d\tassert \"" #cond "\"" \
+ "failed\n", __LINE__); \
} \
} while (0)
+#else
+#define ENA_ASSERT(cond, format, arg...) do {} while (0)
+#endif
#define ENA_MAX32(x, y) RTE_MAX((x), (y))
#define ENA_MAX16(x, y) RTE_MAX((x), (y))
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 02af67a2..e157587b 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -605,7 +605,6 @@ static void ena_stats_restart(struct rte_eth_dev *dev)
rte_atomic64_init(&adapter->drv_stats->ierrors);
rte_atomic64_init(&adapter->drv_stats->oerrors);
- rte_atomic64_init(&adapter->drv_stats->imcasts);
rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
}
@@ -643,7 +642,6 @@ static void ena_stats_get(struct rte_eth_dev *dev,
/* Driver related stats */
stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
- stats->imcasts = rte_atomic64_read(&adapter->drv_stats->imcasts);
stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
}
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index ba6f01e6..aca853c1 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -121,7 +121,6 @@ enum ena_adapter_state {
struct ena_driver_stats {
rte_atomic64_t ierrors;
rte_atomic64_t oerrors;
- rte_atomic64_t imcasts;
rte_atomic64_t rx_nombuf;
};
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index f3162741..3926b795 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src
#
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
-SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h
deleted file mode 100644
index b0191093..00000000
--- a/drivers/net/enic/base/enic_vnic_wq.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved.
- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2015, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef _ENIC_VNIC_WQ_H_
-#define _ENIC_VNIC_WQ_H_
-
-#include "vnic_dev.h"
-#include "vnic_cq.h"
-
-static inline void enic_vnic_post_wq_index(struct vnic_wq *wq)
-{
- struct vnic_wq_buf *buf = wq->to_use;
-
- /* Adding write memory barrier prevents compiler and/or CPU
- * reordering, thus avoiding descriptor posting before
- * descriptor is initialized. Otherwise, hardware can read
- * stale descriptor fields.
- */
- wmb();
- iowrite32(buf->index, &wq->ctrl->posted_index);
-}
-
-static inline void enic_vnic_post_wq(struct vnic_wq *wq,
- void *os_buf, dma_addr_t dma_addr,
- unsigned int len, int sop,
- uint8_t desc_skip_cnt, uint8_t cq_entry,
- uint8_t compressed_send, uint64_t wrid)
-{
- struct vnic_wq_buf *buf = wq->to_use;
-
- buf->sop = sop;
- buf->cq_entry = cq_entry;
- buf->compressed_send = compressed_send;
- buf->desc_skip_cnt = desc_skip_cnt;
- buf->os_buf = os_buf;
- buf->dma_addr = dma_addr;
- buf->len = len;
- buf->wr_id = wrid;
-
- buf = buf->next;
- wq->ring.desc_avail -= desc_skip_cnt;
- wq->to_use = buf;
-
- if (cq_entry)
- enic_vnic_post_wq_index(wq);
-}
-
-#endif /* _ENIC_VNIC_WQ_H_ */
diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h
index 7292d9dc..13e24b43 100644
--- a/drivers/net/enic/base/rq_enet_desc.h
+++ b/drivers/net/enic/base/rq_enet_desc.h
@@ -55,7 +55,7 @@ enum rq_enet_type_types {
#define RQ_ENET_TYPE_BITS 2
#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
-static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
+static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
u64 address, u8 type, u16 length)
{
desc->address = cpu_to_le64(address);
diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h
index 922391b3..13ab87ca 100644
--- a/drivers/net/enic/base/vnic_cq.h
+++ b/drivers/net/enic/base/vnic_cq.h
@@ -90,50 +90,6 @@ struct vnic_cq {
#endif
};
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
-{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
- struct rte_mbuf **rx_pkts = opaque;
- unsigned int ret;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
- if (opaque)
- opaque = (void *)&(rx_pkts[work_done]);
-
- ret = (*q_service)(cq->vdev, cq_desc, type,
- q_number, completed_index, opaque);
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- if (ret)
- work_done++;
- if (work_done >= work_to_do)
- break;
- }
-
- return work_done;
-}
-
void vnic_cq_free(struct vnic_cq *cq);
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int socket_id,
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index e8a50287..fc2e4cc3 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -83,7 +83,7 @@ struct vnic_dev {
struct vnic_intr_coal_timer_info intr_coal_timer_info;
void *(*alloc_consistent)(void *priv, size_t size,
dma_addr_t *dma_handle, u8 *name);
- void (*free_consistent)(struct rte_pci_device *hwdev,
+ void (*free_consistent)(void *priv,
size_t size, void *vaddr,
dma_addr_t dma_handle);
};
@@ -101,7 +101,7 @@ void *vnic_dev_priv(struct vnic_dev *vdev)
void vnic_register_cbacks(struct vnic_dev *vdev,
void *(*alloc_consistent)(void *priv, size_t size,
dma_addr_t *dma_handle, u8 *name),
- void (*free_consistent)(struct rte_pci_device *hwdev,
+ void (*free_consistent)(void *priv,
size_t size, void *vaddr,
dma_addr_t dma_handle))
{
@@ -807,7 +807,7 @@ int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
int vnic_dev_notify_unset(struct vnic_dev *vdev)
{
if (vdev->notify && !vnic_dev_in_reset(vdev)) {
- vdev->free_consistent(vdev->pdev,
+ vdev->free_consistent(vdev->priv,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
@@ -924,16 +924,16 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- vdev->free_consistent(vdev->pdev,
+ vdev->free_consistent(vdev->priv,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->stats)
- vdev->free_consistent(vdev->pdev,
+ vdev->free_consistent(vdev->priv,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- vdev->free_consistent(vdev->pdev,
+ vdev->free_consistent(vdev->priv,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
kfree(vdev);
@@ -1041,7 +1041,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
*entry = (u16)a0;
- vdev->free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
+ vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
a0 = *entry;
ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 113d6acc..689442f3 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -102,7 +102,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
void vnic_register_cbacks(struct vnic_dev *vdev,
void *(*alloc_consistent)(void *priv, size_t size,
dma_addr_t *dma_handle, u8 *name),
- void (*free_consistent)(struct rte_pci_device *hwdev,
+ void (*free_consistent)(void *priv,
size_t size, void *vaddr,
dma_addr_t dma_handle));
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h
index cc34998f..50622479 100644
--- a/drivers/net/enic/base/vnic_enet.h
+++ b/drivers/net/enic/base/vnic_enet.h
@@ -35,6 +35,10 @@
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
+/* Hardware intr coalesce timer is in units of 1.5us */
+#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2 / 3)
+#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3 / 2)
+
/* Device-specific region: enet configuration */
struct vnic_enet_config {
u32 flags;
@@ -50,6 +54,12 @@ struct vnic_enet_config {
u16 vf_rq_count;
u16 num_arfs;
u64 mem_paddr;
+ u16 rdma_qp_id;
+ u16 rdma_qp_count;
+ u16 rdma_resgrp;
+ u32 rdma_mr_id;
+ u32 rdma_mr_count;
+ u32 max_pkt_size;
};
#define VENETF_TSO 0x1 /* TSO enabled */
@@ -64,9 +74,14 @@ struct vnic_enet_config {
#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
#define VENETF_LOOP 0x800 /* Loopback enabled */
-#define VENETF_VMQ 0x4000 /* using VMQ flag for VMware NETQ */
+#define VENETF_FAILOVER 0x1000 /* Fabric failover enabled */
+#define VENETF_USPACE_NIC 0x2000 /* vHPC enabled */
+#define VENETF_VMQ 0x4000 /* VMQ enabled */
+#define VENETF_ARFS 0x8000 /* ARFS enabled */
#define VENETF_VXLAN 0x10000 /* VxLAN offload */
#define VENETF_NVGRE 0x20000 /* NVGRE offload */
+#define VENETF_GRPINTR 0x40000 /* group interrupt */
+
#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c
index cb62c5e5..0e700a12 100644
--- a/drivers/net/enic/base/vnic_rq.c
+++ b/drivers/net/enic/base/vnic_rq.c
@@ -84,11 +84,12 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
iowrite32(cq_index, &rq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
- iowrite32(0, &rq->ctrl->dropped_packet_count);
iowrite32(0, &rq->ctrl->error_status);
iowrite32(fetch_index, &rq->ctrl->fetch_index);
iowrite32(posted_index, &rq->ctrl->posted_index);
-
+ if (rq->is_sop)
+ iowrite32(((rq->is_sop << 10) | rq->data_queue_idx),
+ &rq->ctrl->data_ring);
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
@@ -96,6 +97,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_offset)
{
u32 fetch_index = 0;
+
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
@@ -110,6 +112,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
error_interrupt_offset);
rq->rxst_idx = 0;
rq->tot_pkts = 0;
+ rq->pkt_first_seg = NULL;
+ rq->pkt_last_seg = NULL;
}
void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index e083ccc2..fd9e1704 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -60,10 +60,18 @@ struct vnic_rq_ctrl {
u32 pad7;
u32 error_status; /* 0x48 */
u32 pad8;
- u32 dropped_packet_count; /* 0x50 */
+ u32 tcp_sn; /* 0x50 */
u32 pad9;
- u32 dropped_packet_count_rc; /* 0x58 */
+ u32 unused; /* 0x58 */
u32 pad10;
+ u32 dca_select; /* 0x60 */
+ u32 pad11;
+ u32 dca_value; /* 0x68 */
+ u32 pad12;
+ u32 data_ring; /* 0x70 */
+ u32 pad13;
+ u32 header_split; /* 0x78 */
+ u32 pad14;
};
struct vnic_rq {
@@ -82,6 +90,12 @@ struct vnic_rq {
struct rte_mempool *mp;
uint16_t rxst_idx;
uint32_t tot_pkts;
+ uint16_t data_queue_idx;
+ uint8_t is_sop;
+ uint8_t in_use;
+ struct rte_mbuf *pkt_first_seg;
+ struct rte_mbuf *pkt_last_seg;
+ unsigned int max_mbufs_per_pkt;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
index a3ef4170..9b9ff4d7 100644
--- a/drivers/net/enic/base/vnic_wq.c
+++ b/drivers/net/enic/base/vnic_wq.c
@@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
- struct vnic_wq_buf *buf;
- unsigned int i, j, count = wq->ring.desc_count;
- unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
-
- for (i = 0; i < blks; i++) {
- wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
- if (!wq->bufs[i])
- return -ENOMEM;
- }
-
- for (i = 0; i < blks; i++) {
- buf = wq->bufs[i];
- for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
- buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
- buf->desc = (u8 *)wq->ring.descs +
- wq->ring.desc_size * buf->index;
- if (buf->index + 1 == count) {
- buf->next = wq->bufs[0];
- break;
- } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
- buf->next = wq->bufs[i + 1];
- } else {
- buf->next = buf + 1;
- buf++;
- }
- }
- }
-
- wq->to_use = wq->to_clean = wq->bufs[0];
-
+ unsigned int count = wq->ring.desc_count;
+ /* Allocate the mbuf ring */
+ wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
+ sizeof(struct vnic_wq_buf) * count,
+ RTE_CACHE_LINE_SIZE, wq->socket_id);
+ wq->head_idx = 0;
+ wq->tail_idx = 0;
+ if (wq->bufs == NULL)
+ return -ENOMEM;
return 0;
}
void vnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
- unsigned int i;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
- for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
- if (wq->bufs[i]) {
- kfree(wq->bufs[i]);
- wq->bufs[i] = NULL;
- }
- }
-
+ rte_free(wq->bufs);
wq->ctrl = NULL;
}
-int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
- unsigned int desc_size)
-{
- int mem_size = 0;
-
- mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size);
-
- mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) *
- VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count);
-
- return mem_size;
-}
-
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
@@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
- wq->to_use = wq->to_clean =
- &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
- [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
+ wq->head_idx = fetch_index;
+ wq->tail_idx = wq->head_idx;
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
@@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
vnic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
+ wq->last_completed_index = 0;
}
void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error)
@@ -220,21 +179,24 @@ int vnic_wq_disable(struct vnic_wq *wq)
}
void vnic_wq_clean(struct vnic_wq *wq,
- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
+ void (*buf_clean)(struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
+ unsigned int to_clean = wq->tail_idx;
- buf = wq->to_clean;
+ buf = &wq->bufs[to_clean];
while (vnic_wq_desc_used(wq) > 0) {
- (*buf_clean)(wq, buf);
+ (*buf_clean)(buf);
+ to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
- buf = wq->to_clean = buf->next;
+ buf = &wq->bufs[to_clean];
wq->ring.desc_avail++;
}
- wq->to_use = wq->to_clean = wq->bufs[0];
+ wq->head_idx = 0;
+ wq->tail_idx = 0;
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
index c23de625..38a217f1 100644
--- a/drivers/net/enic/base/vnic_wq.h
+++ b/drivers/net/enic/base/vnic_wq.h
@@ -38,6 +38,7 @@
#include "vnic_dev.h"
#include "vnic_cq.h"
+#include <rte_memzone.h>
/* Work queue control */
struct vnic_wq_ctrl {
@@ -64,42 +65,23 @@ struct vnic_wq_ctrl {
u32 pad9;
};
+/* 16 bytes */
struct vnic_wq_buf {
- struct vnic_wq_buf *next;
- dma_addr_t dma_addr;
- void *os_buf;
- unsigned int len;
- unsigned int index;
- int sop;
- void *desc;
- uint64_t wr_id; /* Cookie */
- uint8_t cq_entry; /* Gets completion event from hw */
- uint8_t desc_skip_cnt; /* Num descs to occupy */
- uint8_t compressed_send; /* Both hdr and payload in one desc */
+ struct rte_mempool *pool;
+ void *mb;
};
-/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
-#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32
-#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64
-#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
- ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
- VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES))
-#define VNIC_WQ_BUF_BLK_SZ(entries) \
- (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
-#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
- DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
-
struct vnic_wq {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
- struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
- struct vnic_wq_buf *to_use;
- struct vnic_wq_buf *to_clean;
- unsigned int pkts_outstanding;
+ struct vnic_wq_buf *bufs;
+ unsigned int head_idx;
+ unsigned int tail_idx;
unsigned int socket_id;
+ const struct rte_memzone *cqmsg_rz;
+ uint16_t last_completed_index;
};
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
@@ -114,11 +96,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
return wq->ring.desc_count - wq->ring.desc_avail - 1;
}
-static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
-{
- return wq->to_use->desc;
-}
-
#define PI_LOG2_CACHE_LINE_SIZE 5
#define PI_INDEX_BITS 12
#define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1)
@@ -191,73 +168,13 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len,
PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF);
}
-static inline void vnic_wq_post(struct vnic_wq *wq,
- void *os_buf, dma_addr_t dma_addr,
- unsigned int len, int sop, int eop,
- uint8_t desc_skip_cnt, uint8_t cq_entry,
- uint8_t compressed_send, uint64_t wrid)
-{
- struct vnic_wq_buf *buf = wq->to_use;
-
- buf->sop = sop;
- buf->cq_entry = cq_entry;
- buf->compressed_send = compressed_send;
- buf->desc_skip_cnt = desc_skip_cnt;
- buf->os_buf = os_buf;
- buf->dma_addr = dma_addr;
- buf->len = len;
- buf->wr_id = wrid;
-
- buf = buf->next;
- if (eop) {
-#ifdef DO_PREFETCH
- uint64_t wr = vnic_cached_posted_index(dma_addr, len,
- buf->index);
-#endif
- /* Adding write memory barrier prevents compiler and/or CPU
- * reordering, thus avoiding descriptor posting before
- * descriptor is initialized. Otherwise, hardware can read
- * stale descriptor fields.
- */
- wmb();
-#ifdef DO_PREFETCH
- /* Intel chipsets seem to limit the rate of PIOs that we can
- * push on the bus. Thus, it is very important to do a single
- * 64 bit write here. With two 32-bit writes, my maximum
- * pkt/sec rate was cut almost in half. -AJF
- */
- iowrite64((uint64_t)wr, &wq->ctrl->posted_index);
-#else
- iowrite32(buf->index, &wq->ctrl->posted_index);
-#endif
- }
- wq->to_use = buf;
-
- wq->ring.desc_avail -= desc_skip_cnt;
-}
-
-static inline void vnic_wq_service(struct vnic_wq *wq,
- struct cq_desc *cq_desc, u16 completed_index,
- void (*buf_service)(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
- void *opaque)
+static inline uint32_t
+buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
{
- struct vnic_wq_buf *buf;
-
- buf = wq->to_clean;
- while (1) {
-
- (*buf_service)(wq, cq_desc, buf, opaque);
-
- wq->ring.desc_avail++;
-
- wq->to_clean = buf->next;
-
- if (buf->index == completed_index)
- break;
-
- buf = wq->to_clean;
- }
+ idx++;
+ if (unlikely(idx == n_descriptors))
+ idx = 0;
+ return idx;
}
void vnic_wq_free(struct vnic_wq *wq);
@@ -275,8 +192,5 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
- void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
-int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count,
- unsigned int desc_size);
-
+ void (*buf_clean)(struct vnic_wq_buf *buf));
#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 8c914f5b..53fed0b8 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -46,6 +46,8 @@
#include "vnic_rss.h"
#include "enic_res.h"
#include "cq_enet_desc.h"
+#include <sys/queue.h>
+#include <rte_spinlock.h>
#define DRV_NAME "enic_pmd"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
@@ -53,8 +55,11 @@
#define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc"
#define ENIC_WQ_MAX 8
-#define ENIC_RQ_MAX 8
-#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
+/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both
+ * RQs use the same CQ.
+ */
+#define ENIC_RQ_MAX 16
+#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
#define VLAN_ETH_HLEN 18
@@ -62,7 +67,6 @@
#define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
#define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */
-#define PKT_TX_TCP_UDP_CKSUM 0x6000
#define ENIC_CALC_IP_CKSUM 1
#define ENIC_CALC_TCP_UDP_CKSUM 2
#define ENIC_MAX_MTU 9000
@@ -91,6 +95,16 @@ struct enic_fdir {
struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
};
+struct enic_soft_stats {
+ rte_atomic64_t rx_nombuf;
+ rte_atomic64_t rx_packet_errors;
+};
+
+struct enic_memzone_entry {
+ const struct rte_memzone *rz;
+ LIST_ENTRY(enic_memzone_entry) entries;
+};
+
/* Per-instance private data structure */
struct enic {
struct enic *next;
@@ -114,6 +128,7 @@ struct enic {
u8 ig_vlan_strip_en;
int link_status;
u8 hw_ip_checksum;
+ u16 max_mtu;
unsigned int flags;
unsigned int priv_flags;
@@ -133,11 +148,38 @@ struct enic {
/* interrupt resource */
struct vnic_intr intr;
unsigned int intr_count;
+
+ /* software counters */
+ struct enic_soft_stats soft_stats;
+
+ /* linked list storing memory allocations */
+ LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
+ rte_spinlock_t memzone_list_lock;
+
};
+static inline unsigned int enic_sop_rq(unsigned int rq)
+{
+ return rq * 2;
+}
+
+static inline unsigned int enic_data_rq(unsigned int rq)
+{
+ return rq * 2 + 1;
+}
+
+static inline unsigned int enic_vnic_rq_count(struct enic *enic)
+{
+ return enic->rq_count * 2;
+}
+
static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
{
- return rq;
+ /* Scatter rx uses two receive queues together with one
+ * completion queue, so the completion queue number is no
+ * longer the same as the rq number.
+ */
+ return rq / 2;
}
static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
@@ -155,14 +197,40 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
return (struct enic *)eth_dev->data->dev_private;
}
-#define RTE_LIBRTE_ENIC_ASSERT_ENABLE
-#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
-#define ASSERT(x) do { \
- if (!(x)) \
- rte_panic("ENIC: x"); \
-} while (0)
+static inline uint32_t
+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
+{
+ uint32_t d = i0 + i1;
+ d -= (d >= n_descriptors) ? n_descriptors : 0;
+ return d;
+}
+
+static inline uint32_t
+enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
+{
+ int32_t d = i1 - i0;
+ return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
+}
+
+static inline uint32_t
+enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
+{
+ idx++;
+ if (unlikely(idx == n_descriptors))
+ idx = 0;
+ return idx;
+}
+
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#define ENIC_ASSERT(cond) \
+ do { \
+ if (unlikely(!(cond))) { \
+ rte_panic("line %d\tassert \"" #cond "\"" \
+ "failed\n", __LINE__); \
+ } \
+ } while (0)
#else
-#define ASSERT(x)
+#define ENIC_ASSERT(cond) do {} while (0)
#endif
extern void enic_fdir_stats_get(struct enic *enic,
@@ -209,5 +277,7 @@ extern int enic_clsf_init(struct enic *enic);
extern void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-
+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index edb56e1d..3365176a 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -148,9 +148,13 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
enic->fdir.nodes[pos] = NULL;
if (unlikely(key->rq_index == queue)) {
/* Nothing to be done */
+ enic->fdir.stats.f_add++;
pos = rte_hash_add_key(enic->fdir.hash, params);
+ if (pos < 0) {
+ dev_err(enic, "Add hash key failed\n");
+ return pos;
+ }
enic->fdir.nodes[pos] = key;
- enic->fdir.stats.f_add++;
dev_warning(enic,
"FDIR rule is already present\n");
return 0;
@@ -213,6 +217,11 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
}
pos = rte_hash_add_key(enic->fdir.hash, params);
+ if (pos < 0) {
+ dev_err(enic, "Add hash key failed\n");
+ return pos;
+ }
+
enic->fdir.nodes[pos] = key;
return 0;
}
@@ -239,15 +248,16 @@ void enic_clsf_destroy(struct enic *enic)
int enic_clsf_init(struct enic *enic)
{
+ char clsf_name[RTE_HASH_NAMESIZE];
struct rte_hash_parameters hash_params = {
- .name = "enicpmd_clsf_hash",
+ .name = clsf_name,
.entries = ENICPMD_CLSF_HASH_ENTRIES,
.key_len = sizeof(struct rte_eth_fdir_filter),
.hash_func = DEFAULT_HASH_FUNC,
.hash_func_init_val = 0,
.socket_id = SOCKET_ID_ANY,
};
-
+ snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
enic->fdir.hash = rte_hash_create(&hash_params);
memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
enic->fdir.stats.free = ENICPMD_FDIR_MAX;
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 6bea9405..a7ce064f 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -269,14 +269,18 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- if (queue_idx >= ENIC_RQ_MAX) {
+ /* With Rx scatter support, two RQs are now used on VIC per RQ used
+ * by the application.
+ */
+ if (queue_idx * 2 >= ENIC_RQ_MAX) {
dev_err(enic,
- "Max number of RX queues exceeded. Max is %d\n",
+ "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
ENIC_RQ_MAX);
return -EINVAL;
}
- eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];
+ eth_dev->data->rx_queues[queue_idx] =
+ (void *)&enic->rq[enic_sop_rq(queue_idx)];
ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
if (ret) {
@@ -435,7 +439,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->max_rx_queues = enic->rq_count;
device_info->max_tx_queues = enic->wq_count;
device_info->min_rx_bufsize = ENIC_MIN_MTU;
- device_info->max_rx_pktlen = enic->config.mtu;
+ device_info->max_rx_pktlen = enic->rte_dev->data->mtu
+ + ETHER_HDR_LEN + 4;
device_info->max_mac_addrs = 1;
device_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
@@ -455,8 +460,12 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
- RTE_PTYPE_L3_IPV4,
- RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
RTE_PTYPE_UNKNOWN
};
@@ -519,69 +528,12 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui
enic_del_mac_address(enic);
}
-
-static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
- uint16_t index;
- unsigned int frags;
- unsigned int pkt_len;
- unsigned int seg_len;
- unsigned int inc_len;
- unsigned int nb_segs;
- struct rte_mbuf *tx_pkt, *next_tx_pkt;
- struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
- struct enic *enic = vnic_dev_priv(wq->vdev);
- unsigned short vlan_id;
- unsigned short ol_flags;
- uint8_t last_seg, eop;
- unsigned int host_tx_descs = 0;
-
- for (index = 0; index < nb_pkts; index++) {
- tx_pkt = *tx_pkts++;
- inc_len = 0;
- nb_segs = tx_pkt->nb_segs;
- if (nb_segs > vnic_wq_desc_avail(wq)) {
- if (index > 0)
- enic_post_wq_index(wq);
-
- /* wq cleanup and try again */
- if (!enic_cleanup_wq(enic, wq) ||
- (nb_segs > vnic_wq_desc_avail(wq))) {
- return index;
- }
- }
-
- pkt_len = tx_pkt->pkt_len;
- vlan_id = tx_pkt->vlan_tci;
- ol_flags = tx_pkt->ol_flags;
- for (frags = 0; inc_len < pkt_len; frags++) {
- if (!tx_pkt)
- break;
- next_tx_pkt = tx_pkt->next;
- seg_len = tx_pkt->data_len;
- inc_len += seg_len;
-
- host_tx_descs++;
- last_seg = 0;
- eop = 0;
- if ((pkt_len == inc_len) || !next_tx_pkt) {
- eop = 1;
- /* post if last packet in batch or > thresh */
- if ((index == (nb_pkts - 1)) ||
- (host_tx_descs > ENIC_TX_POST_THRESH)) {
- last_seg = 1;
- host_tx_descs = 0;
- }
- }
- enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
- !frags, eop, last_seg, ol_flags, vlan_id);
- tx_pkt = next_tx_pkt;
- }
- }
+ struct enic *enic = pmd_priv(eth_dev);
- enic_cleanup_wq(enic, wq);
- return index;
+ ENICPMD_FUNC_TRACE();
+ return enic_set_mtu(enic, mtu);
}
static const struct eth_dev_ops enicpmd_eth_dev_ops = {
@@ -601,7 +553,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.queue_stats_mapping_set = NULL,
.dev_infos_get = enicpmd_dev_info_get,
.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
- .mtu_set = NULL,
+ .mtu_set = enicpmd_mtu_set,
.vlan_filter_set = enicpmd_vlan_filter_set,
.vlan_tpid_set = NULL,
.vlan_offload_set = enicpmd_vlan_offload_set,
@@ -642,7 +594,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
enic->rte_dev = eth_dev;
eth_dev->dev_ops = &enicpmd_eth_dev_ops;
eth_dev->rx_pkt_burst = &enic_recv_pkts;
- eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
pdev = eth_dev->pci_dev;
rte_eth_copy_pci_info(eth_dev, pdev);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index e3da51db..dc831b48 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -58,18 +58,6 @@
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_nic.h"
-#include "enic_vnic_wq.h"
-
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
static inline int enic_is_sriov_vf(struct enic *enic)
{
@@ -92,7 +80,7 @@ static int is_eth_addr_valid(uint8_t *addr)
}
static void
-enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
+enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
{
uint16_t i;
@@ -101,7 +89,7 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
return;
}
- for (i = 0; i < enic->config.rq_desc_count; i++) {
+ for (i = 0; i < rq->ring.desc_count; i++) {
if (rq->mbuf_ring[i]) {
rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
rq->mbuf_ring[i] = NULL;
@@ -109,38 +97,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
}
}
-
void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
{
vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
}
-static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf)
+static void enic_free_wq_buf(struct vnic_wq_buf *buf)
{
- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf;
+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
rte_mempool_put(mbuf->pool, mbuf);
- buf->os_buf = NULL;
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
- __rte_unused struct cq_desc *cq_desc,
- struct vnic_wq_buf *buf,
- __rte_unused void *opaque)
-{
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_wq_service(&enic->wq[q_number], cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- return 0;
+ buf->mb = NULL;
}
static void enic_log_q_error(struct enic *enic)
@@ -155,7 +122,7 @@ static void enic_log_q_error(struct enic *enic)
error_status);
}
- for (i = 0; i < enic->rq_count; i++) {
+ for (i = 0; i < enic_vnic_rq_count(enic); i++) {
error_status = vnic_rq_error_status(&enic->rq[i]);
if (error_status)
dev_err(enic, "RQ[%d] error_status %d\n", i,
@@ -163,93 +130,62 @@ static void enic_log_q_error(struct enic *enic)
}
}
-unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq)
+static void enic_clear_soft_stats(struct enic *enic)
{
- unsigned int cq = enic_cq_wq(enic, wq->index);
-
- /* Return the work done */
- return vnic_cq_service(&enic->cq[cq],
- -1 /*wq_work_to_do*/, enic_wq_service, NULL);
+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
+ rte_atomic64_clear(&soft_stats->rx_nombuf);
+ rte_atomic64_clear(&soft_stats->rx_packet_errors);
}
-void enic_post_wq_index(struct vnic_wq *wq)
+static void enic_init_soft_stats(struct enic *enic)
{
- enic_vnic_post_wq_index(wq);
-}
-
-void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- struct rte_mbuf *tx_pkt, unsigned short len,
- uint8_t sop, uint8_t eop, uint8_t cq_entry,
- uint16_t ol_flags, uint16_t vlan_tag)
-{
- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
- uint16_t mss = 0;
- uint8_t vlan_tag_insert = 0;
- uint64_t bus_addr = (dma_addr_t)
- (tx_pkt->buf_physaddr + tx_pkt->data_off);
-
- if (sop) {
- if (ol_flags & PKT_TX_VLAN_PKT)
- vlan_tag_insert = 1;
-
- if (enic->hw_ip_checksum) {
- if (ol_flags & PKT_TX_IP_CKSUM)
- mss |= ENIC_CALC_IP_CKSUM;
-
- if (ol_flags & PKT_TX_TCP_UDP_CKSUM)
- mss |= ENIC_CALC_TCP_UDP_CKSUM;
- }
- }
-
- wq_enet_desc_enc(desc,
- bus_addr,
- len,
- mss,
- 0 /* header_length */,
- 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */,
- eop,
- cq_entry,
- 0 /* fcoe_encap */,
- vlan_tag_insert,
- vlan_tag,
- 0 /* loopback */);
-
- enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len,
- sop,
- 1 /*desc_skip_cnt*/,
- cq_entry,
- 0 /*compressed send*/,
- 0 /*wrid*/);
+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
+ rte_atomic64_init(&soft_stats->rx_nombuf);
+ rte_atomic64_init(&soft_stats->rx_packet_errors);
+ enic_clear_soft_stats(enic);
}
void enic_dev_stats_clear(struct enic *enic)
{
if (vnic_dev_stats_clear(enic->vdev))
dev_err(enic, "Error in clearing stats\n");
+ enic_clear_soft_stats(enic);
}
void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
{
struct vnic_stats *stats;
+ struct enic_soft_stats *soft_stats = &enic->soft_stats;
+ int64_t rx_truncated;
+ uint64_t rx_packet_errors;
if (vnic_dev_stats_dump(enic->vdev, &stats)) {
dev_err(enic, "Error in getting stats\n");
return;
}
- r_stats->ipackets = stats->rx.rx_frames_ok;
+ /* The number of truncated packets can only be calculated by
+ * subtracting a hardware counter from error packets received by
+ * the driver. Note: this causes transient inaccuracies in the
+ * ipackets count. Also, the length of truncated packets are
+ * counted in ibytes even though truncated packets are dropped
+ * which can make ibytes be slightly higher than it should be.
+ */
+ rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
+ rx_truncated = rx_packet_errors - stats->rx.rx_errors;
+
+ r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
r_stats->opackets = stats->tx.tx_frames_ok;
r_stats->ibytes = stats->rx.rx_bytes_ok;
r_stats->obytes = stats->tx.tx_bytes_ok;
- r_stats->ierrors = stats->rx.rx_errors;
+ r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
r_stats->oerrors = stats->tx.tx_errors;
- r_stats->imissed = stats->rx.rx_drop;
+ r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
- r_stats->imcasts = stats->rx.rx_multicast_frames_ok;
- r_stats->rx_nombuf = stats->rx.rx_no_bufs;
+ r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
}
void enic_del_mac_address(struct enic *enic)
@@ -298,12 +234,35 @@ void enic_init_vnic_resources(struct enic *enic)
unsigned int error_interrupt_enable = 1;
unsigned int error_interrupt_offset = 0;
unsigned int index = 0;
+ unsigned int cq_idx;
+ struct vnic_rq *data_rq;
for (index = 0; index < enic->rq_count; index++) {
- vnic_rq_init(&enic->rq[index],
- enic_cq_rq(enic, index),
+ cq_idx = enic_cq_rq(enic, enic_sop_rq(index));
+
+ vnic_rq_init(&enic->rq[enic_sop_rq(index)],
+ cq_idx,
error_interrupt_enable,
error_interrupt_offset);
+
+ data_rq = &enic->rq[enic_data_rq(index)];
+ if (data_rq->in_use)
+ vnic_rq_init(data_rq,
+ cq_idx,
+ error_interrupt_enable,
+ error_interrupt_offset);
+
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 0 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ 0 /* interrupt offset */,
+ 0 /* cq_message_addr */);
}
for (index = 0; index < enic->wq_count; index++) {
@@ -311,22 +270,19 @@ void enic_init_vnic_resources(struct enic *enic)
enic_cq_wq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
- }
- vnic_dev_stats_clear(enic->vdev);
-
- for (index = 0; index < enic->cq_count; index++) {
- vnic_cq_init(&enic->cq[index],
+ cq_idx = enic_cq_wq(enic, index);
+ vnic_cq_init(&enic->cq[cq_idx],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
0 /* interrupt_enable */,
- 1 /* cq_entry_enable */,
- 0 /* cq_message_enable */,
+ 0 /* cq_entry_enable */,
+ 1 /* cq_message_enable */,
0 /* interrupt offset */,
- 0 /* cq_message_addr */);
+ (u64)enic->wq[index].cqmsg_rz->phys_addr);
}
vnic_intr_init(&enic->intr,
@@ -344,30 +300,35 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
unsigned i;
dma_addr_t dma_addr;
+ if (!rq->in_use)
+ return 0;
+
dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
rq->ring.desc_count);
for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
- mb = rte_rxmbuf_alloc(rq->mp);
+ mb = rte_mbuf_raw_alloc(rq->mp);
if (mb == NULL) {
dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
(unsigned)rq->index);
return -ENOMEM;
}
- dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off);
-
- rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
- mb->buf_len);
+ dma_addr = (dma_addr_t)(mb->buf_physaddr
+ + RTE_PKTMBUF_HEADROOM);
+ rq_enet_desc_enc(rqd, dma_addr,
+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
+ : RQ_ENET_TYPE_NOT_SOP),
+ mb->buf_len - RTE_PKTMBUF_HEADROOM);
rq->mbuf_ring[i] = mb;
}
/* make sure all prior writes are complete before doing the PIO write */
rte_rmb();
- /* Post all but the last 2 cache lines' worth of descriptors */
- rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
- / sizeof(struct rq_enet_desc));
+ /* Post all but the last buffer to VIC. */
+ rq->posted_index = rq->ring.desc_count - 1;
+
rq->rx_nb_hold = 0;
dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
@@ -380,12 +341,14 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
}
static void *
-enic_alloc_consistent(__rte_unused void *priv, size_t size,
+enic_alloc_consistent(void *priv, size_t size,
dma_addr_t *dma_handle, u8 *name)
{
void *vaddr;
const struct rte_memzone *rz;
*dma_handle = 0;
+ struct enic *enic = (struct enic *)priv;
+ struct enic_memzone_entry *mze;
rz = rte_memzone_reserve_aligned((const char *)name,
size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
@@ -398,16 +361,49 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size,
vaddr = rz->addr;
*dma_handle = (dma_addr_t)rz->phys_addr;
+ mze = rte_malloc("enic memzone entry",
+ sizeof(struct enic_memzone_entry), 0);
+
+ if (!mze) {
+ pr_err("%s : Failed to allocate memory for memzone list\n",
+ __func__);
+ rte_memzone_free(rz);
+ }
+
+ mze->rz = rz;
+
+ rte_spinlock_lock(&enic->memzone_list_lock);
+ LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
+ rte_spinlock_unlock(&enic->memzone_list_lock);
+
return vaddr;
}
static void
-enic_free_consistent(__rte_unused struct rte_pci_device *hwdev,
- __rte_unused size_t size,
- __rte_unused void *vaddr,
- __rte_unused dma_addr_t dma_handle)
+enic_free_consistent(void *priv,
+ __rte_unused size_t size,
+ void *vaddr,
+ dma_addr_t dma_handle)
{
- /* Nothing to be done */
+ struct enic_memzone_entry *mze;
+ struct enic *enic = (struct enic *)priv;
+
+ rte_spinlock_lock(&enic->memzone_list_lock);
+ LIST_FOREACH(mze, &enic->memzone_list, entries) {
+ if (mze->rz->addr == vaddr &&
+ mze->rz->phys_addr == dma_handle)
+ break;
+ }
+ if (mze == NULL) {
+ rte_spinlock_unlock(&enic->memzone_list_lock);
+ dev_warning(enic,
+ "Tried to free memory, but couldn't find it in the memzone list\n");
+ return;
+ }
+ LIST_REMOVE(mze, entries);
+ rte_spinlock_unlock(&enic->memzone_list_lock);
+ rte_memzone_free(mze->rz);
+ rte_free(mze);
}
static void
@@ -436,17 +432,28 @@ int enic_enable(struct enic *enic)
"Flow director feature will not work\n");
for (index = 0; index < enic->rq_count; index++) {
- err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
+ err = enic_alloc_rx_queue_mbufs(enic,
+ &enic->rq[enic_sop_rq(index)]);
+ if (err) {
+ dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
+ return err;
+ }
+ err = enic_alloc_rx_queue_mbufs(enic,
+ &enic->rq[enic_data_rq(index)]);
if (err) {
- dev_err(enic, "Failed to alloc RX queue mbufs\n");
+ /* release the allocated mbufs for the sop rq*/
+ enic_rxmbuf_queue_release(enic,
+ &enic->rq[enic_sop_rq(index)]);
+
+ dev_err(enic, "Failed to alloc data RX queue mbufs\n");
return err;
}
}
for (index = 0; index < enic->wq_count; index++)
- vnic_wq_enable(&enic->wq[index]);
+ enic_start_wq(enic, index);
for (index = 0; index < enic->rq_count; index++)
- vnic_rq_enable(&enic->rq[index]);
+ enic_start_rq(enic, index);
vnic_dev_enable_wait(enic->vdev);
@@ -466,7 +473,7 @@ int enic_alloc_intr_resources(struct enic *enic)
dev_info(enic, "vNIC resources used: "\
"wq %d rq %d cq %d intr %d\n",
- enic->wq_count, enic->rq_count,
+ enic->wq_count, enic_vnic_rq_count(enic),
enic->cq_count, enic->intr_count);
err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
@@ -478,14 +485,32 @@ int enic_alloc_intr_resources(struct enic *enic)
void enic_free_rq(void *rxq)
{
- struct vnic_rq *rq = (struct vnic_rq *)rxq;
- struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct vnic_rq *rq_sop, *rq_data;
+ struct enic *enic;
+
+ if (rxq == NULL)
+ return;
+
+ rq_sop = (struct vnic_rq *)rxq;
+ enic = vnic_dev_priv(rq_sop->vdev);
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
+
+ enic_rxmbuf_queue_release(enic, rq_sop);
+ if (rq_data->in_use)
+ enic_rxmbuf_queue_release(enic, rq_data);
+
+ rte_free(rq_sop->mbuf_ring);
+ if (rq_data->in_use)
+ rte_free(rq_data->mbuf_ring);
+
+ rq_sop->mbuf_ring = NULL;
+ rq_data->mbuf_ring = NULL;
- enic_rxmbuf_queue_release(enic, rq);
- rte_free(rq->mbuf_ring);
- rq->mbuf_ring = NULL;
- vnic_rq_free(rq);
- vnic_cq_free(&enic->cq[rq->index]);
+ vnic_rq_free(rq_sop);
+ if (rq_data->in_use)
+ vnic_rq_free(rq_data);
+
+ vnic_cq_free(&enic->cq[rq_sop->index]);
}
void enic_start_wq(struct enic *enic, uint16_t queue_idx)
@@ -500,12 +525,32 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
void enic_start_rq(struct enic *enic, uint16_t queue_idx)
{
- vnic_rq_enable(&enic->rq[queue_idx]);
+ struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+
+ if (rq_data->in_use)
+ vnic_rq_enable(rq_data);
+ rte_mb();
+ vnic_rq_enable(rq_sop);
+
}
int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
{
- return vnic_rq_disable(&enic->rq[queue_idx]);
+ int ret1 = 0, ret2 = 0;
+
+ struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
+ struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+
+ ret2 = vnic_rq_disable(rq_sop);
+ rte_mb();
+ if (rq_data->in_use)
+ ret1 = vnic_rq_disable(rq_data);
+
+ if (ret2)
+ return ret2;
+ else
+ return ret1;
}
int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
@@ -513,62 +558,156 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
uint16_t nb_desc)
{
int rc;
- struct vnic_rq *rq = &enic->rq[queue_idx];
+ uint16_t sop_queue_idx = enic_sop_rq(queue_idx);
+ uint16_t data_queue_idx = enic_data_rq(queue_idx);
+ struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
+ struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
+ unsigned int mbuf_size, mbufs_per_pkt;
+ unsigned int nb_sop_desc, nb_data_desc;
+ uint16_t min_sop, max_sop, min_data, max_data;
+
+ rq_sop->is_sop = 1;
+ rq_sop->data_queue_idx = data_queue_idx;
+ rq_data->is_sop = 0;
+ rq_data->data_queue_idx = 0;
+ rq_sop->socket_id = socket_id;
+ rq_sop->mp = mp;
+ rq_data->socket_id = socket_id;
+ rq_data->mp = mp;
+ rq_sop->in_use = 1;
+
+ mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
+ RTE_PKTMBUF_HEADROOM);
+
+ if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+ dev_info(enic, "Scatter rx mode enabled\n");
+ /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
+ mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) +
+ (mbuf_size - 1)) / mbuf_size;
+ } else {
+ dev_info(enic, "Scatter rx mode disabled\n");
+ mbufs_per_pkt = 1;
+ }
- rq->socket_id = socket_id;
- rq->mp = mp;
+ if (mbufs_per_pkt > 1) {
+ dev_info(enic, "Scatter rx mode in use\n");
+ rq_data->in_use = 1;
+ } else {
+ dev_info(enic, "Scatter rx mode not being used\n");
+ rq_data->in_use = 0;
+ }
- if (nb_desc) {
- if (nb_desc > enic->config.rq_desc_count) {
- dev_warning(enic,
- "RQ %d - number of rx desc in cmd line (%d)"\
- "is greater than that in the UCSM/CIMC adapter"\
- "policy. Applying the value in the adapter "\
- "policy (%d).\n",
- queue_idx, nb_desc, enic->config.rq_desc_count);
- nb_desc = enic->config.rq_desc_count;
- }
- dev_info(enic, "RX Queues - effective number of descs:%d\n",
- nb_desc);
+ /* number of descriptors have to be a multiple of 32 */
+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
+ nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
+
+ rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
+ rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
+
+ if (mbufs_per_pkt > 1) {
+ min_sop = 64;
+ max_sop = ((enic->config.rq_desc_count /
+ (mbufs_per_pkt - 1)) & ~0x1F);
+ min_data = min_sop * (mbufs_per_pkt - 1);
+ max_data = enic->config.rq_desc_count;
+ } else {
+ min_sop = 64;
+ max_sop = enic->config.rq_desc_count;
+ min_data = 0;
+ max_data = 0;
}
- /* Allocate queue resources */
- rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
- nb_desc, sizeof(struct rq_enet_desc));
+ if (nb_desc < (min_sop + min_data)) {
+ dev_warning(enic,
+ "Number of rx descs too low, adjusting to minimum\n");
+ nb_sop_desc = min_sop;
+ nb_data_desc = min_data;
+ } else if (nb_desc > (max_sop + max_data)) {
+ dev_warning(enic,
+ "Number of rx_descs too high, adjusting to maximum\n");
+ nb_sop_desc = max_sop;
+ nb_data_desc = max_data;
+ }
+ if (mbufs_per_pkt > 1) {
+ dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
+ enic->config.mtu, mbuf_size, min_sop + min_data,
+ max_sop + max_data);
+ }
+ dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
+ nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
+
+ /* Allocate sop queue resources */
+ rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
+ nb_sop_desc, sizeof(struct rq_enet_desc));
if (rc) {
- dev_err(enic, "error in allocation of rq\n");
+ dev_err(enic, "error in allocation of sop rq\n");
goto err_exit;
}
-
+ nb_sop_desc = rq_sop->ring.desc_count;
+
+ if (rq_data->in_use) {
+ /* Allocate data queue resources */
+ rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
+ nb_data_desc,
+ sizeof(struct rq_enet_desc));
+ if (rc) {
+ dev_err(enic, "error in allocation of data rq\n");
+ goto err_free_rq_sop;
+ }
+ nb_data_desc = rq_data->ring.desc_count;
+ }
rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
- socket_id, nb_desc,
- sizeof(struct cq_enet_rq_desc));
+ socket_id, nb_sop_desc + nb_data_desc,
+ sizeof(struct cq_enet_rq_desc));
if (rc) {
dev_err(enic, "error in allocation of cq for rq\n");
- goto err_free_rq_exit;
+ goto err_free_rq_data;
}
- /* Allocate the mbuf ring */
- rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
- sizeof(struct rte_mbuf *) * nb_desc,
- RTE_CACHE_LINE_SIZE, rq->socket_id);
+ /* Allocate the mbuf rings */
+ rq_sop->mbuf_ring = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->mbuf_ring",
+ sizeof(struct rte_mbuf *) * nb_sop_desc,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_sop->mbuf_ring == NULL)
+ goto err_free_cq;
+
+ if (rq_data->in_use) {
+ rq_data->mbuf_ring = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->mbuf_ring",
+ sizeof(struct rte_mbuf *) * nb_data_desc,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_data->mbuf_ring == NULL)
+ goto err_free_sop_mbuf;
+ }
- if (rq->mbuf_ring != NULL)
- return 0;
+ return 0;
+err_free_sop_mbuf:
+ rte_free(rq_sop->mbuf_ring);
+err_free_cq:
/* cleanup on error */
vnic_cq_free(&enic->cq[queue_idx]);
-err_free_rq_exit:
- vnic_rq_free(rq);
+err_free_rq_data:
+ if (rq_data->in_use)
+ vnic_rq_free(rq_data);
+err_free_rq_sop:
+ vnic_rq_free(rq_sop);
err_exit:
return -ENOMEM;
}
void enic_free_wq(void *txq)
{
- struct vnic_wq *wq = (struct vnic_wq *)txq;
- struct enic *enic = vnic_dev_priv(wq->vdev);
+ struct vnic_wq *wq;
+ struct enic *enic;
+
+ if (txq == NULL)
+ return;
+ wq = (struct vnic_wq *)txq;
+ enic = vnic_dev_priv(wq->vdev);
+ rte_memzone_free(wq->cqmsg_rz);
vnic_wq_free(wq);
vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
}
@@ -579,6 +718,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
int err;
struct vnic_wq *wq = &enic->wq[queue_idx];
unsigned int cq_index = enic_cq_wq(enic, queue_idx);
+ char name[NAME_MAX];
+ static int instance;
wq->socket_id = socket_id;
if (nb_desc) {
@@ -614,6 +755,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
dev_err(enic, "error in allocation of cq for wq\n");
}
+ /* setup up CQ message */
+ snprintf((char *)name, sizeof(name),
+ "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
+ instance++);
+
+ wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
+ sizeof(uint32_t),
+ SOCKET_ID_ANY, 0,
+ ENIC_ALIGN);
+ if (!wq->cqmsg_rz)
+ return -ENOMEM;
+
return err;
}
@@ -637,10 +790,12 @@ int enic_disable(struct enic *enic)
if (err)
return err;
}
- for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_disable(&enic->rq[i]);
- if (err)
- return err;
+ for (i = 0; i < enic_vnic_rq_count(enic); i++) {
+ if (enic->rq[i].in_use) {
+ err = vnic_rq_disable(&enic->rq[i]);
+ if (err)
+ return err;
+ }
}
vnic_dev_set_reset_flag(enic->vdev, 1);
@@ -649,8 +804,9 @@ int enic_disable(struct enic *enic)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ for (i = 0; i < enic_vnic_rq_count(enic); i++)
+ if (enic->rq[i].in_use)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
vnic_intr_clean(&enic->intr);
@@ -723,7 +879,7 @@ static int enic_set_rsskey(struct enic *enic)
rss_key_buf_pa,
sizeof(union vnic_rss_key));
- enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
+ enic_free_consistent(enic, sizeof(union vnic_rss_key),
rss_key_buf_va, rss_key_buf_pa);
return err;
@@ -744,13 +900,14 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
return -ENOMEM;
for (i = 0; i < (1 << rss_hash_bits); i++)
- (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
+ (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
+ enic_sop_rq(i % enic->rq_count);
err = enic_set_rss_cpu(enic,
rss_cpu_buf_pa,
sizeof(union vnic_rss_cpu));
- enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
+ enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
rss_cpu_buf_va, rss_cpu_buf_pa);
return err;
@@ -806,6 +963,8 @@ int enic_setup_finish(struct enic *enic)
{
int ret;
+ enic_init_soft_stats(enic);
+
ret = enic_set_rss_nic_cfg(enic);
if (ret) {
dev_err(enic, "Failed to config nic, aborting.\n");
@@ -851,21 +1010,79 @@ static void enic_dev_deinit(struct enic *enic)
int enic_set_vnic_res(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
+ int rc = 0;
- if ((enic->rq_count < eth_dev->data->nb_rx_queues) ||
- (enic->wq_count < eth_dev->data->nb_tx_queues)) {
- dev_err(dev, "Not enough resources configured, aborting\n");
- return -1;
+ /* With Rx scatter support, two RQs are now used per RQ used by
+ * the application.
+ */
+ if (enic->rq_count < (eth_dev->data->nb_rx_queues * 2)) {
+ dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
+ eth_dev->data->nb_rx_queues,
+ eth_dev->data->nb_rx_queues * 2, enic->rq_count);
+ rc = -EINVAL;
+ }
+ if (enic->wq_count < eth_dev->data->nb_tx_queues) {
+ dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
+ eth_dev->data->nb_tx_queues, enic->wq_count);
+ rc = -EINVAL;
}
- enic->rq_count = eth_dev->data->nb_rx_queues;
- enic->wq_count = eth_dev->data->nb_tx_queues;
if (enic->cq_count < (enic->rq_count + enic->wq_count)) {
- dev_err(dev, "Not enough resources configured, aborting\n");
- return -1;
+ dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
+ enic->rq_count + enic->wq_count, enic->cq_count);
+ rc = -EINVAL;
+ }
+
+ if (rc == 0) {
+ enic->rq_count = eth_dev->data->nb_rx_queues;
+ enic->wq_count = eth_dev->data->nb_tx_queues;
+ enic->cq_count = enic->rq_count + enic->wq_count;
}
- enic->cq_count = enic->rq_count + enic->wq_count;
+ return rc;
+}
+
+/* The Cisco NIC can send and receive packets up to a max packet size
+ * determined by the NIC type and firmware. There is also an MTU
+ * configured into the NIC via the CIMC/UCSM management interface
+ * which can be overridden by this function (up to the max packet size).
+ * Depending on the network setup, doing so may cause packet drops
+ * and unexpected behavior.
+ */
+int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
+{
+ uint16_t old_mtu; /* previous setting */
+ uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+
+ old_mtu = eth_dev->data->mtu;
+ config_mtu = enic->config.mtu;
+
+ /* only works with Rx scatter disabled */
+ if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter)
+ return -ENOTSUP;
+
+ if (new_mtu > enic->max_mtu) {
+ dev_err(enic,
+ "MTU not updated: requested (%u) greater than max (%u)\n",
+ new_mtu, enic->max_mtu);
+ return -EINVAL;
+ }
+ if (new_mtu < ENIC_MIN_MTU) {
+ dev_info(enic,
+ "MTU not updated: requested (%u) less than min (%u)\n",
+ new_mtu, ENIC_MIN_MTU);
+ return -EINVAL;
+ }
+ if (new_mtu > config_mtu)
+ dev_warning(enic,
+ "MTU (%u) is greater than value configured in NIC (%u)\n",
+ new_mtu, config_mtu);
+
+ /* update the mtu */
+ eth_dev->data->mtu = new_mtu;
+
+ dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
return 0;
}
@@ -920,6 +1137,9 @@ int enic_probe(struct enic *enic)
goto err_out;
}
+ LIST_INIT(&enic->memzone_list);
+ rte_spinlock_init(&enic->memzone_list_lock);
+
vnic_register_cbacks(enic->vdev,
enic_alloc_consistent,
enic_free_consistent);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index ebe379dd..b271d340 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -83,6 +83,20 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_usec);
GET_CONFIG(loop_tag);
GET_CONFIG(num_arfs);
+ GET_CONFIG(max_pkt_size);
+
+ /* max packet size is only defined in newer VIC firmware
+ * and will be 0 for legacy firmware and VICs
+ */
+ if (c->max_pkt_size > ENIC_DEFAULT_MAX_PKT_SIZE)
+ enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4);
+ else
+ enic->max_mtu = ENIC_DEFAULT_MAX_PKT_SIZE - (ETHER_HDR_LEN + 4);
+ if (c->mtu == 0)
+ c->mtu = 1500;
+
+ enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu,
+ max_t(u16, ENIC_MIN_MTU, c->mtu));
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
@@ -96,21 +110,16 @@ int enic_get_vnic_config(struct enic *enic)
c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
- if (c->mtu == 0)
- c->mtu = 1500;
- c->mtu = min_t(u16, ENIC_MAX_MTU,
- max_t(u16, ENIC_MIN_MTU,
- c->mtu));
-
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
"vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
- "wq/rq %d/%d mtu %d\n",
+ "wq/rq %d/%d mtu %d, max mtu:%d\n",
enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
- c->wq_desc_count, c->rq_desc_count, c->mtu);
+ c->wq_desc_count, c->rq_desc_count,
+ enic->rte_dev->data->mtu, enic->max_mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"rss %s intr mode %s type %s timer %d usec "
"loopback tag 0x%04x\n",
@@ -196,8 +205,9 @@ void enic_free_vnic_resources(struct enic *enic)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_free(&enic->wq[i]);
- for (i = 0; i < enic->rq_count; i++)
- vnic_rq_free(&enic->rq[i]);
+ for (i = 0; i < enic_vnic_rq_count(enic); i++)
+ if (enic->rq[i].in_use)
+ vnic_rq_free(&enic->rq[i]);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
vnic_intr_free(&enic->intr);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 00fa71de..303530ef 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -46,95 +46,19 @@
#define ENIC_MAX_RQ_DESCS 4096
#define ENIC_MIN_MTU 68
-#define ENIC_MAX_MTU 9000
+
+/* Does not include (possible) inserted VLAN tag and FCS */
+#define ENIC_DEFAULT_MAX_PKT_SIZE 9022
#define ENIC_MULTICAST_PERFECT_FILTERS 32
#define ENIC_UNICAST_PERFECT_FILTERS 32
#define ENIC_NON_TSO_MAX_DESC 16
#define ENIC_DEFAULT_RX_FREE_THRESH 32
-#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2)
+#define ENIC_TX_XMIT_MAX 64
#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
-static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
- void *os_buf, dma_addr_t dma_addr, unsigned int len,
- unsigned int mss_or_csum_offset, unsigned int hdr_len,
- int vlan_tag_insert, unsigned int vlan_tag,
- int offload_mode, int cq_entry, int sop, int eop, int loopback)
-{
- struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
- u8 desc_skip_cnt = 1;
- u8 compressed_send = 0;
- u64 wrid = 0;
-
- wq_enet_desc_enc(desc,
- (u64)dma_addr | VNIC_PADDR_TARGET,
- (u16)len,
- (u16)mss_or_csum_offset,
- (u16)hdr_len, (u8)offload_mode,
- (u8)eop, (u8)cq_entry,
- 0, /* fcoe_encap */
- (u8)vlan_tag_insert,
- (u16)vlan_tag,
- (u8)loopback);
-
- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
- (u8)cq_entry, compressed_send, wrid);
-}
-
-static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
- void *os_buf, dma_addr_t dma_addr, unsigned int len,
- int eop, int loopback)
-{
- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
- 0, 0, 0, 0, 0,
- eop, 0 /* !SOP */, eop, loopback);
-}
-
-static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
- dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
- unsigned int vlan_tag, int eop, int loopback)
-{
- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
- 0, 0, vlan_tag_insert, vlan_tag,
- WQ_ENET_OFFLOAD_MODE_CSUM,
- eop, 1 /* SOP */, eop, loopback);
-}
-
-static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
- void *os_buf, dma_addr_t dma_addr, unsigned int len,
- int ip_csum, int tcpudp_csum, int vlan_tag_insert,
- unsigned int vlan_tag, int eop, int loopback)
-{
- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
- (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
- 0, vlan_tag_insert, vlan_tag,
- WQ_ENET_OFFLOAD_MODE_CSUM,
- eop, 1 /* SOP */, eop, loopback);
-}
-
-static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
- void *os_buf, dma_addr_t dma_addr, unsigned int len,
- unsigned int csum_offset, unsigned int hdr_len,
- int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback)
-{
- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
- csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
- WQ_ENET_OFFLOAD_MODE_CSUM_L4,
- eop, 1 /* SOP */, eop, loopback);
-}
-
-static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
- void *os_buf, dma_addr_t dma_addr, unsigned int len,
- unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
- unsigned int vlan_tag, int eop, int loopback)
-{
- enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
- mss, hdr_len, vlan_tag_insert, vlan_tag,
- WQ_ENET_OFFLOAD_MODE_TSO,
- eop, 1 /* SOP */, eop, loopback);
-}
struct enic;
diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
deleted file mode 100644
index 232987a5..00000000
--- a/drivers/net/enic/enic_rx.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * Copyright (c) 2014, Cisco Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <rte_mbuf.h>
-#include <rte_ethdev.h>
-#include <rte_prefetch.h>
-
-#include "enic_compat.h"
-#include "rq_enet_desc.h"
-#include "enic.h"
-
-#define RTE_PMD_USE_PREFETCH
-
-#ifdef RTE_PMD_USE_PREFETCH
-/*
- * Prefetch a cache line into all cache levels.
- */
-#define rte_enic_prefetch(p) rte_prefetch0(p)
-#else
-#define rte_enic_prefetch(p) do {} while (0)
-#endif
-
-#ifdef RTE_PMD_PACKET_PREFETCH
-#define rte_packet_prefetch(p) rte_prefetch1(p)
-#else
-#define rte_packet_prefetch(p) do {} while (0)
-#endif
-
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
-static inline uint16_t
-enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
-{
- return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
-}
-
-static inline uint16_t
-enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
-{
- return(le16_to_cpu(crd->bytes_written_flags) &
- ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
-}
-
-static inline uint8_t
-enic_cq_rx_desc_packet_error(uint16_t bwflags)
-{
- return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
-}
-
-static inline uint8_t
-enic_cq_rx_desc_eop(uint16_t ciflags)
-{
- return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
- == CQ_ENET_RQ_DESC_FLAGS_EOP;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
-{
- return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
-}
-
-static inline uint8_t
-enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
-{
- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
- CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
-}
-
-static inline uint8_t
-enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
-{
- return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
- CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
-}
-
-static inline uint8_t
-enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
-{
- return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
- CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
-}
-
-static inline uint32_t
-enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
-{
- return le32_to_cpu(cqrd->rss_hash);
-}
-
-static inline uint16_t
-enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
-{
- return le16_to_cpu(cqrd->vlan);
-}
-
-static inline uint16_t
-enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
-{
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- return le16_to_cpu(cqrd->bytes_written_flags) &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-}
-
-static inline uint8_t
-enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
-{
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t bwflags;
- int ret = 0;
- uint64_t pkt_err_flags = 0;
-
- bwflags = enic_cq_rx_desc_bwflags(cqrd);
- if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
- pkt_err_flags = PKT_RX_MAC_ERR;
- ret = 1;
- }
- *pkt_err_flags_out = pkt_err_flags;
- return ret;
-}
-
-/*
- * Lookup table to translate RX CQ flags to mbuf flags.
- */
-static inline uint32_t
-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
-{
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint8_t cqrd_flags = cqrd->flags;
- static const uint32_t cq_type_table[128] __rte_cache_aligned = {
- [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
- | RTE_PTYPE_L4_UDP,
- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
- | RTE_PTYPE_L4_TCP,
- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
- | RTE_PTYPE_L4_FRAG,
- [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
- [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
- | RTE_PTYPE_L4_UDP,
- [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
- | RTE_PTYPE_L4_TCP,
- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
- | RTE_PTYPE_L4_FRAG,
- /* All others reserved */
- };
- cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
- | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
- | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
- return cq_type_table[cqrd_flags];
-}
-
-static inline void
-enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
-{
- struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t ciflags, bwflags, pkt_flags = 0;
- ciflags = enic_cq_rx_desc_ciflags(cqrd);
- bwflags = enic_cq_rx_desc_bwflags(cqrd);
-
- mbuf->ol_flags = 0;
-
- /* flags are meaningless if !EOP */
- if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
- goto mbuf_flags_done;
-
- /* VLAN stripping */
- if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
- pkt_flags |= PKT_RX_VLAN_PKT;
- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
- } else {
- mbuf->vlan_tci = 0;
- }
-
- /* RSS flag */
- if (enic_cq_rx_desc_rss_type(cqrd)) {
- pkt_flags |= PKT_RX_RSS_HASH;
- mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
- }
-
- /* checksum flags */
- if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
- if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
- }
- }
-
- mbuf_flags_done:
- mbuf->ol_flags = pkt_flags;
-}
-
-static inline uint32_t
-enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-{
- uint32_t d = i0 + i1;
- ASSERT(i0 < n_descriptors);
- ASSERT(i1 < n_descriptors);
- d -= (d >= n_descriptors) ? n_descriptors : 0;
- return d;
-}
-
-
-uint16_t
-enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
-{
- struct vnic_rq *rq = rx_queue;
- struct enic *enic = vnic_dev_priv(rq->vdev);
- unsigned int rx_id;
- struct rte_mbuf *nmb, *rxmb;
- uint16_t nb_rx = 0;
- uint16_t nb_hold;
- struct vnic_cq *cq;
- volatile struct cq_desc *cqd_ptr;
- uint8_t color;
-
- cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
-
- nb_hold = rq->rx_nb_hold; /* mbufs held by software */
-
- while (nb_rx < nb_pkts) {
- volatile struct rq_enet_desc *rqd_ptr;
- dma_addr_t dma_addr;
- struct cq_desc cqd;
- uint64_t ol_err_flags;
- uint8_t packet_error;
-
- /* Check for pkts available */
- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
- & CQ_DESC_COLOR_MASK;
- if (color == cq->last_color)
- break;
-
- /* Get the cq descriptor and rq pointer */
- cqd = *cqd_ptr;
- rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
-
- /* allocate a new mbuf */
- nmb = rte_rxmbuf_alloc(rq->mp);
- if (nmb == NULL) {
- dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
- enic->port_id, (unsigned)rq->index);
- rte_eth_devices[enic->port_id].
- data->rx_mbuf_alloc_failed++;
- break;
- }
-
- /* A packet error means descriptor and data are untrusted */
- packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
-
- /* Get the mbuf to return and replace with one just allocated */
- rxmb = rq->mbuf_ring[rx_id];
- rq->mbuf_ring[rx_id] = nmb;
-
- /* Increment cqd, rqd, mbuf_table index */
- rx_id++;
- if (unlikely(rx_id == rq->ring.desc_count)) {
- rx_id = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- /* Prefetch next mbuf & desc while processing current one */
- cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
- rte_enic_prefetch(cqd_ptr);
- rte_enic_prefetch(rq->mbuf_ring[rx_id]);
- rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
- + rx_id);
-
- /* Push descriptor for newly allocated mbuf */
- dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
- rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
-
- /* Fill in the rest of the mbuf */
- rxmb->data_off = RTE_PKTMBUF_HEADROOM;
- rxmb->nb_segs = 1;
- rxmb->next = NULL;
- rxmb->port = enic->port_id;
- if (!packet_error) {
- rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
- enic_cq_rx_to_pkt_flags(&cqd, rxmb);
- } else {
- rxmb->pkt_len = 0;
- rxmb->packet_type = 0;
- rxmb->ol_flags = 0;
- }
- rxmb->data_len = rxmb->pkt_len;
-
- /* prefetch mbuf data for caller */
- rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
- RTE_PKTMBUF_HEADROOM));
-
- /* store the mbuf address into the next entry of the array */
- rx_pkts[nb_rx++] = rxmb;
- }
-
- nb_hold += nb_rx;
- cq->to_clean = rx_id;
-
- if (nb_hold > rq->rx_free_thresh) {
- rq->posted_index = enic_ring_add(rq->ring.desc_count,
- rq->posted_index, nb_hold);
- nb_hold = 0;
- rte_mb();
- iowrite32(rq->posted_index, &rq->ctrl->posted_index);
- }
-
- rq->rx_nb_hold = nb_hold;
-
- return nb_rx;
-}
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
new file mode 100644
index 00000000..5ac1d69c
--- /dev/null
+++ b/drivers/net/enic/enic_rxtx.c
@@ -0,0 +1,547 @@
+/* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+
+#include "enic_compat.h"
+#include "rq_enet_desc.h"
+#include "enic.h"
+
+#define RTE_PMD_USE_PREFETCH
+
+#ifdef RTE_PMD_USE_PREFETCH
+/*Prefetch a cache line into all cache levels. */
+#define rte_enic_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_enic_prefetch(p) do {} while (0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while (0)
+#endif
+
+static inline uint16_t
+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
+{
+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
+}
+
+static inline uint16_t
+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
+{
+ return le16_to_cpu(crd->bytes_written_flags) &
+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_packet_error(uint16_t bwflags)
+{
+ return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_eop(uint16_t ciflags)
+{
+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
+ == CQ_ENET_RQ_DESC_FLAGS_EOP;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
+{
+ return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
+{
+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+}
+
+static inline uint32_t
+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
+{
+ return le32_to_cpu(cqrd->rss_hash);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
+{
+ return le16_to_cpu(cqrd->vlan);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ return le16_to_cpu(cqrd->bytes_written_flags) &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+static inline uint8_t
+enic_cq_rx_check_err(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint16_t bwflags;
+
+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
+ return 1;
+ return 0;
+}
+
+/* Lookup table to translate RX CQ flags to mbuf flags. */
+static inline uint32_t
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint8_t cqrd_flags = cqrd->flags;
+ static const uint32_t cq_type_table[128] __rte_cache_aligned = {
+ [0x00] = RTE_PTYPE_UNKNOWN,
+ [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_NONFRAG,
+ [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_UDP,
+ [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_TCP,
+ [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_FRAG,
+ [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_UDP,
+ [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
+ | RTE_PTYPE_L4_TCP,
+ [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_NONFRAG,
+ [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_UDP,
+ [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_TCP,
+ [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_FRAG,
+ [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_UDP,
+ [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
+ | RTE_PTYPE_L4_TCP,
+ /* All others reserved */
+ };
+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
+ return cq_type_table[cqrd_flags];
+}
+
+static inline void
+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint16_t ciflags, bwflags, pkt_flags = 0;
+ ciflags = enic_cq_rx_desc_ciflags(cqrd);
+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
+
+ mbuf->ol_flags = 0;
+
+ /* flags are meaningless if !EOP */
+ if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
+ goto mbuf_flags_done;
+
+ /* VLAN stripping */
+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
+ pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+ } else {
+ mbuf->vlan_tci = 0;
+ }
+
+ /* RSS flag */
+ if (enic_cq_rx_desc_rss_type(cqrd)) {
+ pkt_flags |= PKT_RX_RSS_HASH;
+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
+ }
+
+ /* checksum flags */
+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+
+ mbuf_flags_done:
+ mbuf->ol_flags = pkt_flags;
+}
+
+uint16_t
+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct vnic_rq *sop_rq = rx_queue;
+ struct vnic_rq *data_rq;
+ struct vnic_rq *rq;
+ struct enic *enic = vnic_dev_priv(sop_rq->vdev);
+ uint16_t cq_idx;
+ uint16_t rq_idx;
+ uint16_t rq_num;
+ struct rte_mbuf *nmb, *rxmb;
+ uint16_t nb_rx = 0;
+ struct vnic_cq *cq;
+ volatile struct cq_desc *cqd_ptr;
+ uint8_t color;
+ uint16_t seg_length;
+ struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
+ struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
+
+ cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
+ cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+
+ data_rq = &enic->rq[sop_rq->data_queue_idx];
+
+ while (nb_rx < nb_pkts) {
+ volatile struct rq_enet_desc *rqd_ptr;
+ dma_addr_t dma_addr;
+ struct cq_desc cqd;
+ uint8_t packet_error;
+ uint16_t ciflags;
+
+ /* Check for pkts available */
+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
+ & CQ_DESC_COLOR_MASK;
+ if (color == cq->last_color)
+ break;
+
+ /* Get the cq descriptor and extract rq info from it */
+ cqd = *cqd_ptr;
+ rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
+ rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
+
+ rq = &enic->rq[rq_num];
+ rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
+
+ /* allocate a new mbuf */
+ nmb = rte_mbuf_raw_alloc(rq->mp);
+ if (nmb == NULL) {
+ rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
+ break;
+ }
+
+ /* A packet error means descriptor and data are untrusted */
+ packet_error = enic_cq_rx_check_err(&cqd);
+
+ /* Get the mbuf to return and replace with one just allocated */
+ rxmb = rq->mbuf_ring[rq_idx];
+ rq->mbuf_ring[rq_idx] = nmb;
+
+ /* Increment cqd, rqd, mbuf_table index */
+ cq_idx++;
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ /* Prefetch next mbuf & desc while processing current one */
+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+ rte_enic_prefetch(cqd_ptr);
+
+ ciflags = enic_cq_rx_desc_ciflags(
+ (struct cq_enet_rq_desc *)&cqd);
+
+ /* Push descriptor for newly allocated mbuf */
+ dma_addr = (dma_addr_t)(nmb->buf_physaddr +
+ RTE_PKTMBUF_HEADROOM);
+ rq_enet_desc_enc(rqd_ptr, dma_addr,
+ (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
+ : RQ_ENET_TYPE_NOT_SOP),
+ nmb->buf_len - RTE_PKTMBUF_HEADROOM);
+
+ /* Fill in the rest of the mbuf */
+ seg_length = enic_cq_rx_desc_n_bytes(&cqd);
+
+ if (rq->is_sop) {
+ first_seg = rxmb;
+ first_seg->nb_segs = 1;
+ first_seg->pkt_len = seg_length;
+ } else {
+ first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
+ + seg_length);
+ first_seg->nb_segs++;
+ last_seg->next = rxmb;
+ }
+
+ rxmb->next = NULL;
+ rxmb->port = enic->port_id;
+ rxmb->data_len = seg_length;
+
+ rq->rx_nb_hold++;
+
+ if (!(enic_cq_rx_desc_eop(ciflags))) {
+ last_seg = rxmb;
+ continue;
+ }
+
+ /* cq rx flags are only valid if eop bit is set */
+ first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
+ enic_cq_rx_to_pkt_flags(&cqd, first_seg);
+
+ if (unlikely(packet_error)) {
+ rte_pktmbuf_free(first_seg);
+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+ continue;
+ }
+
+
+ /* prefetch mbuf data for caller */
+ rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
+ RTE_PKTMBUF_HEADROOM));
+
+ /* store the mbuf address into the next entry of the array */
+ rx_pkts[nb_rx++] = first_seg;
+ }
+
+ sop_rq->pkt_first_seg = first_seg;
+ sop_rq->pkt_last_seg = last_seg;
+
+ cq->to_clean = cq_idx;
+
+ if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
+ sop_rq->rx_free_thresh) {
+ if (data_rq->in_use) {
+ data_rq->posted_index =
+ enic_ring_add(data_rq->ring.desc_count,
+ data_rq->posted_index,
+ data_rq->rx_nb_hold);
+ data_rq->rx_nb_hold = 0;
+ }
+ sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
+ sop_rq->posted_index,
+ sop_rq->rx_nb_hold);
+ sop_rq->rx_nb_hold = 0;
+
+ rte_mb();
+ if (data_rq->in_use)
+ iowrite32(data_rq->posted_index,
+ &data_rq->ctrl->posted_index);
+ rte_compiler_barrier();
+ iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
+ }
+
+
+ return nb_rx;
+}
+
+static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
+{
+ struct vnic_wq_buf *buf;
+ struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
+ unsigned int nb_to_free, nb_free = 0, i;
+ struct rte_mempool *pool;
+ unsigned int tail_idx;
+ unsigned int desc_count = wq->ring.desc_count;
+
+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+ + 1;
+ tail_idx = wq->tail_idx;
+ buf = &wq->bufs[tail_idx];
+ pool = ((struct rte_mbuf *)buf->mb)->pool;
+ for (i = 0; i < nb_to_free; i++) {
+ buf = &wq->bufs[tail_idx];
+ m = (struct rte_mbuf *)(buf->mb);
+ if (likely(m->pool == pool)) {
+ ENIC_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(pool, (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ pool = m->pool;
+ }
+ tail_idx = enic_ring_incr(desc_count, tail_idx);
+ buf->mb = NULL;
+ }
+
+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
+
+ wq->tail_idx = tail_idx;
+ wq->ring.desc_avail += nb_to_free;
+}
+
+unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
+{
+ u16 completed_index;
+
+ completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
+
+ if (wq->last_completed_index != completed_index) {
+ enic_free_wq_bufs(wq, completed_index);
+ wq->last_completed_index = completed_index;
+ }
+ return 0;
+}
+
+uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t index;
+ unsigned int pkt_len, data_len;
+ unsigned int nb_segs;
+ struct rte_mbuf *tx_pkt;
+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+ unsigned short vlan_id;
+ uint64_t ol_flags;
+ uint64_t ol_flags_mask;
+ unsigned int wq_desc_avail;
+ int head_idx;
+ struct vnic_wq_buf *buf;
+ unsigned int desc_count;
+ struct wq_enet_desc *descs, *desc_p, desc_tmp;
+ uint16_t mss;
+ uint8_t vlan_tag_insert;
+ uint8_t eop;
+ uint64_t bus_addr;
+
+ enic_cleanup_wq(enic, wq);
+ wq_desc_avail = vnic_wq_desc_avail(wq);
+ head_idx = wq->head_idx;
+ desc_count = wq->ring.desc_count;
+ ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+
+ nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
+
+ for (index = 0; index < nb_pkts; index++) {
+ tx_pkt = *tx_pkts++;
+ nb_segs = tx_pkt->nb_segs;
+ if (nb_segs > wq_desc_avail) {
+ if (index > 0)
+ goto post;
+ goto done;
+ }
+
+ pkt_len = tx_pkt->pkt_len;
+ data_len = tx_pkt->data_len;
+ ol_flags = tx_pkt->ol_flags;
+ mss = 0;
+ vlan_id = 0;
+ vlan_tag_insert = 0;
+ bus_addr = (dma_addr_t)
+ (tx_pkt->buf_physaddr + tx_pkt->data_off);
+
+ descs = (struct wq_enet_desc *)wq->ring.descs;
+ desc_p = descs + head_idx;
+
+ eop = (data_len == pkt_len);
+
+ if (ol_flags & ol_flags_mask) {
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ vlan_tag_insert = 1;
+ vlan_id = tx_pkt->vlan_tci;
+ }
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ mss |= ENIC_CALC_IP_CKSUM;
+
+ /* Nic uses just 1 bit for UDP and TCP */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ case PKT_TX_UDP_CKSUM:
+ mss |= ENIC_CALC_TCP_UDP_CKSUM;
+ break;
+ }
+ }
+
+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
+ eop, 0, vlan_tag_insert, vlan_id, 0);
+
+ *desc_p = desc_tmp;
+ buf = &wq->bufs[head_idx];
+ buf->mb = (void *)tx_pkt;
+ head_idx = enic_ring_incr(desc_count, head_idx);
+ wq_desc_avail--;
+
+ if (!eop) {
+ for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
+ tx_pkt->next) {
+ data_len = tx_pkt->data_len;
+
+ if (tx_pkt->next == NULL)
+ eop = 1;
+ desc_p = descs + head_idx;
+ bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
+ + tx_pkt->data_off);
+ wq_enet_desc_enc((struct wq_enet_desc *)
+ &desc_tmp, bus_addr, data_len,
+ mss, 0, 0, eop, eop, 0,
+ vlan_tag_insert, vlan_id, 0);
+
+ *desc_p = desc_tmp;
+ buf = &wq->bufs[head_idx];
+ buf->mb = (void *)tx_pkt;
+ head_idx = enic_ring_incr(desc_count, head_idx);
+ wq_desc_avail--;
+ }
+ }
+ }
+ post:
+ rte_wmb();
+ iowrite32(head_idx, &wq->ctrl->posted_index);
+ done:
+ wq->ring.desc_avail = wq_desc_avail;
+ wq->head_idx = head_idx;
+
+ return index;
+}
+
+
diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile
index 602a2d2d..afcbd1d8 100644
--- a/drivers/net/fm10k/Makefile
+++ b/drivers/net/fm10k/Makefile
@@ -43,13 +43,14 @@ EXPORT_MAP := rte_pmd_fm10k_version.map
LIBABIVER := 1
-ifeq ($(CC), icc)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
-else ifeq ($(CC), clang)
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+
#
## CFLAGS for clang
#
@@ -99,5 +100,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR) += fm10k_rxtx_vec.c
DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_net
+DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index c2d377f1..eb77705e 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -947,7 +947,7 @@ fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
/* Return if it didn't acquire valid glort range */
- if (!fm10k_glort_valid(hw))
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
return;
fm10k_mbx_lock(hw);
@@ -969,7 +969,7 @@ fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
/* Return if it didn't acquire valid glort range */
- if (!fm10k_glort_valid(hw))
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
return;
if (dev->data->all_multicast == 1)
@@ -995,7 +995,7 @@ fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
/* Return if it didn't acquire valid glort range */
- if (!fm10k_glort_valid(hw))
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
return;
/* If promiscuous mode is enabled, it doesn't make sense to enable
@@ -1026,7 +1026,7 @@ fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
/* Return if it didn't acquire valid glort range */
- if (!fm10k_glort_valid(hw))
+ if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
return;
if (dev->data->promiscuous) {
@@ -1256,8 +1256,46 @@ fm10k_link_update(struct rte_eth_dev *dev,
return 0;
}
+static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+{
+ unsigned i, q;
+ unsigned count = 0;
+
+ if (xstats_names != NULL) {
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ /* Global stats */
+ for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", fm10k_hw_stats_strings[count].name);
+ count++;
+ }
+
+ /* PF queue stats */
+ for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
+ for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", q,
+ fm10k_hw_stats_rx_q_strings[i].name);
+ count++;
+ }
+ for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", q,
+ fm10k_hw_stats_tx_q_strings[i].name);
+ count++;
+ }
+ }
+ }
+ return FM10K_NB_XSTATS;
+}
+
static int
-fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct fm10k_hw_stats *hw_stats =
@@ -1269,8 +1307,6 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* Global stats */
for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "%s", fm10k_hw_stats_strings[count].name);
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
fm10k_hw_stats_strings[count].offset);
count++;
@@ -1279,18 +1315,12 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* PF queue stats */
for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_q%u_%s", q,
- fm10k_hw_stats_rx_q_strings[i].name);
xstats[count].value =
*(uint64_t *)(((char *)&hw_stats->q[q]) +
fm10k_hw_stats_rx_q_strings[i].offset);
count++;
}
for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_q%u_%s", q,
- fm10k_hw_stats_tx_q_strings[i].name);
xstats[count].value =
*(uint64_t *)(((char *)&hw_stats->q[q]) +
fm10k_hw_stats_tx_q_strings[i].offset);
@@ -2629,6 +2659,7 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
.allmulticast_disable = fm10k_dev_allmulticast_disable,
.stats_get = fm10k_stats_get,
.xstats_get = fm10k_xstats_get,
+ .xstats_get_names = fm10k_xstats_get_names,
.stats_reset = fm10k_stats_reset,
.xstats_reset = fm10k_stats_reset,
.link_update = fm10k_link_update,
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 81ed4e79..dd92a91e 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -96,22 +96,6 @@ rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
m->ol_flags |= PKT_RX_RSS_HASH;
-
- if (unlikely((d->d.staterr &
- (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
- (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
- m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
- if (unlikely((d->d.staterr &
- (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
- (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
- m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-
- if (unlikely(d->d.staterr & FM10K_RXD_STATUS_HBO))
- m->ol_flags |= PKT_RX_HBUF_OVERFLOW;
-
- if (unlikely(d->d.staterr & FM10K_RXD_STATUS_RXE))
- m->ol_flags |= PKT_RX_RECIP_ERR;
}
uint16_t
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index f8efe8f5..9ea747e1 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -101,7 +101,7 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, PKT_RX_RECIP_ERR, 0);
+ 0, 0, 0, 0);
/* map rss type to rss hash flag */
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
@@ -487,10 +487,10 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
if (split_packet) {
- rte_prefetch0(&rx_pkts[pos]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
}
/* D.1 pkt 3,4 convert format from desc to pktmbuf */
@@ -606,8 +606,11 @@ fm10k_reassemble_packets(struct fm10k_rx_queue *rxq,
if (!split_flags[buf_idx]) {
/* it's the last packet of the set */
+#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
start->hash = end->hash;
start->ol_flags = end->ol_flags;
+ start->packet_type = end->packet_type;
+#endif
pkts[pkt_idx++] = start;
start = end = NULL;
}
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 6dd6eaab..53fe145f 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -48,9 +48,9 @@ LIBABIVER := 1
# Add extra flags for base driver files (also known as shared code)
# to disable warnings
#
-ifeq ($(CC), icc)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
CFLAGS_BASE_DRIVER = -wd593 -wd188
-else ifeq ($(CC), clang)
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
CFLAGS_BASE_DRIVER += -Wno-sign-compare
CFLAGS_BASE_DRIVER += -Wno-unused-value
CFLAGS_BASE_DRIVER += -Wno-unused-parameter
@@ -85,7 +85,7 @@ VPATH += $(SRCDIR)/base
#
# all source are stored in SRCS-y
-# base driver is based on the package of dpdk-i40e.2016.01.07.14.tar.gz.
+# base driver is based on the package of dpdk-i40e.2016.04.18.12.tar.gz.
#
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_adminq.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_common.c
@@ -102,9 +102,16 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+# vector PMD driver needs SSE4.1 support
+ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
+CFLAGS_i40e_rxtx_vec.o += -msse4.1
+endif
+
+
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index 222add40..0d3a83fa 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -37,18 +37,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#ifdef PF_DRIVER
-/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
- return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
- desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
-}
-
-#endif /* PF_DRIVER */
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -584,6 +572,26 @@ shutdown_arq_out:
i40e_release_spinlock(&hw->aq.arq_spinlock);
return ret_code;
}
+#ifdef PF_DRIVER
+
+/**
+ * i40e_resume_aq - resume AQ processing from 0
+ * @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
+#endif /* PF_DRIVER */
/**
* i40e_init_adminq - main initialization routine for Admin Queue
@@ -598,12 +606,15 @@ shutdown_arq_out:
**/
enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
{
- enum i40e_status_code ret_code;
#ifdef PF_DRIVER
- u16 eetrack_lo, eetrack_hi;
u16 cfg_ptr, oem_hi, oem_lo;
+ u16 eetrack_lo, eetrack_hi;
+#endif
+ enum i40e_status_code ret_code;
+#ifdef PF_DRIVER
int retry = 0;
#endif
+
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
@@ -612,8 +623,6 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
-
- /* initialize spin locks */
i40e_init_spinlock(&hw->aq.asq_spinlock);
i40e_init_spinlock(&hw->aq.arq_spinlock);
@@ -680,13 +689,9 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_release_on_done = false;
+ hw->nvm_release_on_done = false;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- ret_code = i40e_aq_set_hmc_resource_profile(hw,
- I40E_HMC_PROFILE_DEFAULT,
- 0,
- NULL);
#endif /* PF_DRIVER */
ret_code = I40E_SUCCESS;
@@ -720,8 +725,6 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
-
- /* destroy the spinlocks */
i40e_destroy_spinlock(&hw->aq.asq_spinlock);
i40e_destroy_spinlock(&hw->aq.arq_spinlock);
@@ -747,7 +750,6 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
-
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
@@ -780,7 +782,11 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
+#ifdef VF_DRIVER
bool i40e_asq_done(struct i40e_hw *hw)
+#else
+STATIC bool i40e_asq_done(struct i40e_hw *hw)
+#endif
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
@@ -938,7 +944,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- /* ugh! delay while spin_lock */
i40e_msec_delay(1);
total_delay++;
} while (total_delay < hw->aq.asq_cmd_timeout);
@@ -1120,27 +1125,8 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_use = ntu;
#ifdef PF_DRIVER
- if (i40e_is_nvm_update_op(&e->desc)) {
- if (hw->aq.nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->aq.nvm_release_on_done = false;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
- }
-
-#endif
+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
+#endif /* PF_DRIVER */
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
@@ -1151,16 +1137,3 @@ clean_arq_element_err:
return ret_code;
}
-void i40e_resume_aq(struct i40e_hw *hw)
-{
- /* Registers are reset after PF reset */
- hw->aq.asq.next_to_use = 0;
- hw->aq.asq.next_to_clean = 0;
-
- i40e_config_asq_regs(hw);
-
- hw->aq.arq.next_to_use = 0;
- hw->aq.arq.next_to_clean = 0;
-
- i40e_config_arq_regs(hw);
-}
diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h
index 40c86d9d..750973c5 100644
--- a/drivers/net/i40e/base/i40e_adminq.h
+++ b/drivers/net/i40e/base/i40e_adminq.h
@@ -104,7 +104,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
@@ -158,8 +157,8 @@ STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
}
/* general information */
-#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
#ifdef I40E_ESS_SUPPORT
#define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */
#endif
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index fe9d5b51..2b7a7608 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -224,10 +224,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -450,6 +446,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -931,7 +928,7 @@ struct i40e_aqc_vsi_properties_data {
u8 up_enable_bits;
u8 sched_reserved;
/* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress table */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
__le16 qs_handle[8];
@@ -1571,7 +1568,8 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 reserved1[28];
};
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
@@ -1646,27 +1644,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1702,6 +1679,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@@ -1918,7 +1899,10 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports needs both bits 4 and 5 */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
u8 reserved[15];
};
@@ -1966,7 +1950,7 @@ struct i40e_aqc_nvm_config_read {
#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
- __le16 element_id; /* Feature/field ID */
+ __le16 element_id; /* Feature/field ID */
__le16 element_id_msw; /* MSWord of field ID */
__le32 address_high;
__le32 address_low;
@@ -1987,9 +1971,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
-#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2013,7 +1998,7 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
/* OEM Post Update (indirect 0x0720)
* no command data struct used
*/
- struct i40e_aqc_nvm_oem_post_update {
+struct i40e_aqc_nvm_oem_post_update {
#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
u8 sel_data;
u8 reserved[7];
@@ -2303,7 +2288,8 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp);
*/
struct i40e_aqc_lldp_stop_start_specific_agent {
#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
-#define I40E_AQC_START_SPECIFIC_AGENT_MASK (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
@@ -2325,7 +2311,7 @@ struct i40e_aqc_add_udp_tunnel {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
struct i40e_aqc_add_udp_tunnel_completion {
- __le16 udp_port;
+ __le16 udp_port;
u8 filter_entry_index;
u8 multiple_pfs;
#define I40E_AQC_SINGLE_PF 0x0
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index ef3425e1..98ed4b68 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -67,6 +67,8 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
#ifdef X722_SUPPORT
@@ -78,6 +80,8 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
#endif
@@ -371,14 +375,16 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
/* the most we could have left is 16 bytes, pad with zeros */
if (i < len) {
char d_buf[16];
- int j;
+ int j, i_sav;
+ i_sav = i;
memset(d_buf, 0, sizeof(d_buf));
for (j = 0; i < len; j++, i++)
d_buf[j] = buf[i];
i40e_debug(hw, mask,
"\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ i_sav, d_buf[0], d_buf[1],
+ d_buf[2], d_buf[3],
d_buf[4], d_buf[5], d_buf[6], d_buf[7],
d_buf[8], d_buf[9], d_buf[10], d_buf[11],
d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
@@ -1317,8 +1323,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- /* It can take upto 15 secs for GRST steady state */
- grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */
+ grst_del = grst_del * 20;
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
@@ -2215,10 +2220,12 @@ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
* @seid: vsi number
* @set: set unicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -2231,8 +2238,9 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
@@ -3040,10 +3048,7 @@ enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- if (!rule_id)
- return I40E_ERR_PARAM;
- } else {
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
/* count and mr_list shall be valid for rule_type INGRESS VLAN
* mirroring. For other rule_type, count and rule_type should
* not matter.
@@ -3240,67 +3245,6 @@ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_get_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * query the HMC profile of the device.
- **/
-enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile *profile,
- u8 *pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *resp =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- enum i40e_status_code status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_query_hmc_resource_profile);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
- I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
- *pe_vf_enabled_count = resp->pe_vf_enabled &
- I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
-
- return status;
-}
-
-/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *cmd =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- enum i40e_status_code status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_hmc_resource_profile);
-
- cmd->pm_profile = (u8)profile;
- cmd->pe_vf_enabled = pe_vf_enabled_count;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_request_resource
* @hw: pointer to the hw struct
* @resource: resource id
@@ -3728,7 +3672,7 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->num_msix_vectors = number;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: MSIX vector count = %d\n",
- p->num_msix_vectors_vf);
+ p->num_msix_vectors);
break;
case I40E_AQ_CAP_ID_VF_MSIX:
p->num_msix_vectors_vf = number;
@@ -3818,6 +3762,12 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
"HW Capability: wr_csr_prot = 0x%llX\n\n",
(p->wr_csr_prot & 0xffff));
break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = true;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = true;
+ break;
#ifdef X722_SUPPORT
case I40E_AQ_CAP_ID_WOL_AND_PROXY:
hw->num_wol_proxy_filters = (u16)number;
@@ -4486,7 +4436,7 @@ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
}
/**
- * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port
* @hw: pointer to the hw struct
* @flags: component flags
* @mac_seid: uplink seid (MAC SEID)
@@ -5484,6 +5434,35 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
}
/**
+ * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
+ * @filters: list of cloud filters
+ * @filter_count: length of list
+ *
+ * There's an issue in the device where the Geneve VNI layout needs
+ * to be shifted 1 byte over from the VxLAN VNI
+ **/
+STATIC void i40e_fix_up_geneve_vni(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
+ int i;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (le16_to_cpu(f[i].flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = le32_to_cpu(f[i].tenant_id);
+ f[i].tenant_id = cpu_to_le32(ti << 8);
+ }
+ }
+}
+
+/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
@@ -5503,8 +5482,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- u16 buff_len;
enum i40e_status_code status;
+ u16 buff_len;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
@@ -5515,6 +5494,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
@@ -5552,6 +5533,8 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index f8443405..ed73e1d2 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -49,6 +49,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
@@ -65,6 +67,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index f4e4eaa4..4fa1220b 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -872,10 +872,10 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
- hw->aq.nvm_release_on_done,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -889,7 +889,18 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* going into the state machine
*/
if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
return I40E_SUCCESS;
}
@@ -908,6 +919,14 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
case I40E_NVMUPD_STATE_INIT_WAIT:
case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ return I40E_SUCCESS;
+ }
+
status = I40E_ERR_NOT_READY;
*perrno = -EBUSY;
break;
@@ -980,7 +999,8 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -996,7 +1016,8 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -1009,10 +1030,12 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (status)
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
}
break;
@@ -1030,7 +1053,8 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
-EIO;
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -1125,8 +1149,10 @@ retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (!status)
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
break;
case I40E_NVMUPD_WRITE_LCB:
@@ -1138,7 +1164,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1153,6 +1180,7 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
@@ -1167,7 +1195,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1217,6 +1246,37 @@ retry:
}
/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1378,6 +1438,12 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
return status;
}
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index 8c84ed82..38e7ba5b 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -204,6 +204,13 @@ struct i40e_virt_mem {
#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+#define cpu_to_le16(o) rte_cpu_to_le_16(o)
+#define cpu_to_le32(s) rte_cpu_to_le_32(s)
+#define cpu_to_le64(h) rte_cpu_to_le_64(h)
+#define le16_to_cpu(a) rte_le_to_cpu_16(a)
+#define le32_to_cpu(c) rte_le_to_cpu_32(c)
+#define le64_to_cpu(k) rte_le_to_cpu_64(k)
+
/* SW spinlock */
struct i40e_spinlock {
rte_spinlock_t spinlock;
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 674430de..03dda937 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -67,14 +67,15 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
+#ifdef VF_DRIVER
bool i40e_asq_done(struct i40e_hw *hw);
+#endif
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
#ifdef X722_SUPPORT
@@ -165,7 +166,8 @@ enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
@@ -345,10 +347,6 @@ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
u8 tcmap, bool request, u8 *tcmap_ret,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile *profile,
- u8 *pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
@@ -359,10 +357,6 @@ enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
@@ -411,7 +405,6 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
-
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
@@ -473,6 +466,7 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
#endif /* PF_DRIVER */
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index d5ca67af..5349419f 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -188,7 +188,7 @@ enum i40e_memcpy_type {
};
#ifdef X722_SUPPORT
-#define I40E_FW_API_VERSION_MINOR_X722 0x0004
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
#endif
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
@@ -376,6 +376,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -655,6 +660,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1631,4 +1638,37 @@ struct i40e_lldp_variables {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h
index 26208f3f..fd51ec32 100644
--- a/drivers/net/i40e/base/i40e_virtchnl.h
+++ b/drivers/net/i40e/base/i40e_virtchnl.h
@@ -87,10 +87,15 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
#ifdef I40E_SOL_VF_SUPPORT
I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19,
#endif
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -164,6 +169,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -172,8 +178,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -349,6 +355,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the rss fields in
+ * the vf resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bc28d3c3..f414d938 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,9 @@
#include "i40e_pf.h"
#include "i40e_regs.h"
+#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
+#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
+
#define I40E_CLEAR_PXE_WAIT_MS 200
/* Maximun number of capability elements */
@@ -305,7 +308,10 @@ static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
static void i40e_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
+static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
uint16_t queue_id,
@@ -447,6 +453,8 @@ static int i40e_get_eeprom(struct rte_eth_dev *dev,
static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
+static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
@@ -467,6 +475,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.link_update = i40e_dev_link_update,
.stats_get = i40e_dev_stats_get,
.xstats_get = i40e_dev_xstats_get,
+ .xstats_get_names = i40e_dev_xstats_get_names,
.stats_reset = i40e_dev_stats_reset,
.xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
@@ -520,6 +529,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.get_eeprom_length = i40e_get_eeprom_length,
.get_eeprom = i40e_get_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
+ .mtu_set = i40e_dev_mtu_set,
};
/* store statistics names and its offset in stats structure */
@@ -751,6 +761,161 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
}
static int
+floating_veb_list_handler(__rte_unused const char *key,
+ const char *floating_veb_value,
+ void *opaque)
+{
+ int idx = 0;
+ unsigned int count = 0;
+ char *end = NULL;
+ int min, max;
+ bool *vf_floating_veb = opaque;
+
+ while (isblank(*floating_veb_value))
+ floating_veb_value++;
+
+ /* Reset floating VEB configuration for VFs */
+ for (idx = 0; idx < I40E_MAX_VF; idx++)
+ vf_floating_veb[idx] = false;
+
+ min = I40E_MAX_VF;
+ do {
+ while (isblank(*floating_veb_value))
+ floating_veb_value++;
+ if (*floating_veb_value == '\0')
+ return -1;
+ errno = 0;
+ idx = strtoul(floating_veb_value, &end, 10);
+ if (errno || end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ';') || (*end == '\0')) {
+ max = idx;
+ if (min == I40E_MAX_VF)
+ min = idx;
+ if (max >= I40E_MAX_VF)
+ max = I40E_MAX_VF - 1;
+ for (idx = min; idx <= max; idx++) {
+ vf_floating_veb[idx] = true;
+ count++;
+ }
+ min = I40E_MAX_VF;
+ } else {
+ return -1;
+ }
+ floating_veb_value = end + 1;
+ } while (*end != '\0');
+
+ if (count == 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+config_vf_floating_veb(struct rte_devargs *devargs,
+ uint16_t floating_veb,
+ bool *vf_floating_veb)
+{
+ struct rte_kvargs *kvlist;
+ int i;
+ const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
+
+ if (!floating_veb)
+ return;
+ /* All the VFs attach to the floating VEB by default
+ * when the floating VEB is enabled.
+ */
+ for (i = 0; i < I40E_MAX_VF; i++)
+ vf_floating_veb[i] = true;
+
+ if (devargs == NULL)
+ return;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return;
+
+ if (!rte_kvargs_count(kvlist, floating_veb_list)) {
+ rte_kvargs_free(kvlist);
+ return;
+ }
+ /* When the floating_veb_list parameter exists, all the VFs
+ * will attach to the legacy VEB firstly, then configure VFs
+ * to the floating VEB according to the floating_veb_list.
+ */
+ if (rte_kvargs_process(kvlist, floating_veb_list,
+ floating_veb_list_handler,
+ vf_floating_veb) < 0) {
+ rte_kvargs_free(kvlist);
+ return;
+ }
+ rte_kvargs_free(kvlist);
+}
+
+static int
+i40e_check_floating_handler(__rte_unused const char *key,
+ const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+is_floating_veb_supported(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, floating_veb_key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ /* Floating VEB is enabled when there's key-value:
+ * enable_floating_veb=1
+ */
+ if (rte_kvargs_process(kvlist, floating_veb_key,
+ i40e_check_floating_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static void
+config_floating_veb(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
+
+ if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
+ pf->floating_veb = is_floating_veb_supported(pci_dev->devargs);
+ config_vf_floating_veb(pci_dev->devargs, pf->floating_veb,
+ pf->floating_veb_list);
+ } else {
+ pf->floating_veb = false;
+ }
+}
+
+static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
@@ -843,6 +1008,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
((hw->nvm.version >> 4) & 0xff),
(hw->nvm.version & 0xf), hw->nvm.eetrack);
+ /* Need the special FW version to support floating VEB */
+ config_floating_veb(dev);
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
@@ -920,12 +1087,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
"VLAN ether type");
goto err_setup_pf_switch;
}
- ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, ETHER_TYPE_VLAN);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to set the default outer "
- "VLAN ether type");
- goto err_setup_pf_switch;
- }
/* PF setup, which includes VSI setup */
ret = i40e_pf_setup(pf);
@@ -934,6 +1095,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_setup_pf_switch;
}
+ /* reset all stats of the device, including pf and main vsi */
+ i40e_dev_stats_reset(dev);
+
vsi = pf->main_vsi;
/* Disable double vlan by default */
@@ -1400,15 +1564,58 @@ i40e_parse_link_speeds(uint16_t link_speeds)
}
static int
-i40e_phy_conf_link(__rte_unused struct i40e_hw *hw,
- __rte_unused uint8_t abilities,
- __rte_unused uint8_t force_speed)
-{
- /* Skip any phy config on both 10G and 40G interfaces, as a workaround
- * for the link control limitation of that all link control should be
- * handled by firmware. It should follow up if link control will be
- * opened to software driver in future firmware versions.
- */
+i40e_phy_conf_link(struct i40e_hw *hw,
+ uint8_t abilities,
+ uint8_t force_speed)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp phy_ab;
+ struct i40e_aq_set_phy_config phy_conf;
+ const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
+ I40E_AQ_PHY_FLAG_PAUSE_RX |
+ I40E_AQ_PHY_FLAG_PAUSE_RX |
+ I40E_AQ_PHY_FLAG_LOW_POWER;
+ const uint8_t advt = I40E_LINK_SPEED_40GB |
+ I40E_LINK_SPEED_10GB |
+ I40E_LINK_SPEED_1GB |
+ I40E_LINK_SPEED_100MB;
+ int ret = -ENOTSUP;
+
+
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
+ NULL);
+ if (status)
+ return ret;
+
+ memset(&phy_conf, 0, sizeof(phy_conf));
+
+ /* bits 0-2 use the values from get_phy_abilities_resp */
+ abilities &= ~mask;
+ abilities |= phy_ab.abilities & mask;
+
+ /* update ablities and speed */
+ if (abilities & I40E_AQ_PHY_AN_ENABLED)
+ phy_conf.link_speed = advt;
+ else
+ phy_conf.link_speed = force_speed;
+
+ phy_conf.abilities = abilities;
+
+ /* use get_phy_abilities_resp value for the rest */
+ phy_conf.phy_type = phy_ab.phy_type;
+ phy_conf.eee_capability = phy_ab.eee_capability;
+ phy_conf.eeer = phy_ab.eeer_val;
+ phy_conf.low_power_ctrl = phy_ab.d3_lpan;
+
+ PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
+ phy_ab.abilities, phy_ab.link_speed);
+ PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
+ phy_conf.abilities, phy_conf.link_speed);
+
+ status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
+ if (status)
+ return ret;
+
return I40E_SUCCESS;
}
@@ -1424,8 +1631,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
abilities |= I40E_AQ_PHY_AN_ENABLED;
- else
- abilities |= I40E_AQ_PHY_LINK_ENABLED;
+ abilities |= I40E_AQ_PHY_LINK_ENABLED;
+
+ /* Skip changing speed on 40G interfaces, FW does not support */
+ if (i40e_is_40G_device(hw->device_id)) {
+ speed = I40E_LINK_SPEED_UNKNOWN;
+ abilities |= I40E_AQ_PHY_AN_ENABLED;
+ }
return i40e_phy_conf_link(hw, abilities, speed);
}
@@ -1661,7 +1873,7 @@ i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
int status;
status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
- true, NULL);
+ true, NULL, true);
if (status != I40E_SUCCESS)
PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
@@ -1681,7 +1893,7 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
int status;
status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
- false, NULL);
+ false, NULL, true);
if (status != I40E_SUCCESS)
PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
@@ -1735,7 +1947,7 @@ i40e_dev_set_link_up(struct rte_eth_dev *dev)
* Set device link down.
*/
static int
-i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
+i40e_dev_set_link_down(struct rte_eth_dev *dev)
{
uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
@@ -2099,7 +2311,6 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->obytes = ns->eth.tx_bytes;
stats->oerrors = ns->eth.tx_errors +
pf->main_vsi->eth_stats.tx_errors;
- stats->imcasts = pf->main_vsi->eth_stats.rx_multicast;
/* Rx Errors */
stats->imissed = ns->eth.rx_discards +
@@ -2203,8 +2414,58 @@ i40e_xstats_calc_num(void)
(I40E_NB_TXQ_PRIO_XSTATS * 8);
}
+static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned count = 0;
+ unsigned i, prio;
+
+ if (xstats_names == NULL)
+ return i40e_xstats_calc_num();
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ /* Get stats from i40e_eth_stats struct */
+ for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", rte_i40e_stats_strings[i].name);
+ count++;
+ }
+
+ /* Get individiual stats from i40e_hw_port struct */
+ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", rte_i40e_hw_port_strings[i].name);
+ count++;
+ }
+
+ for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", prio,
+ rte_i40e_rxq_prio_strings[i].name);
+ count++;
+ }
+ }
+
+ for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
+ for (prio = 0; prio < 8; prio++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", prio,
+ rte_i40e_txq_prio_strings[i].name);
+ count++;
+ }
+ }
+ return count;
+}
+
static int
-i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -2225,8 +2486,6 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* Get stats from i40e_eth_stats struct */
for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "%s", rte_i40e_stats_strings[i].name);
xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
rte_i40e_stats_strings[i].offset);
count++;
@@ -2234,19 +2493,13 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* Get individiual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "%s", rte_i40e_hw_port_strings[i].name);
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
- rte_i40e_hw_port_strings[i].offset);
+ rte_i40e_hw_port_strings[i].offset);
count++;
}
for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
for (prio = 0; prio < 8; prio++) {
- snprintf(xstats[count].name,
- sizeof(xstats[count].name),
- "rx_priority%u_%s", prio,
- rte_i40e_rxq_prio_strings[i].name);
xstats[count].value =
*(uint64_t *)(((char *)hw_stats) +
rte_i40e_rxq_prio_strings[i].offset +
@@ -2257,10 +2510,6 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
for (prio = 0; prio < 8; prio++) {
- snprintf(xstats[count].name,
- sizeof(xstats[count].name),
- "tx_priority%u_%s", prio,
- rte_i40e_txq_prio_strings[i].name);
xstats[count].value =
*(uint64_t *)(((char *)hw_stats) +
rte_i40e_txq_prio_strings[i].offset +
@@ -2390,13 +2639,24 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
uint64_t reg_r = 0, reg_w = 0;
uint16_t reg_id = 0;
int ret = 0;
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
switch (vlan_type) {
case ETH_VLAN_TYPE_OUTER:
- reg_id = 2;
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
break;
case ETH_VLAN_TYPE_INNER:
- reg_id = 3;
+ if (qinq)
+ reg_id = 3;
+ else {
+ ret = -EINVAL;
+ PMD_DRV_LOG(ERR,
+ "Unsupported vlan type in single vlan.\n");
+ return ret;
+ }
break;
default:
ret = -EINVAL;
@@ -2458,8 +2718,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
i40e_vsi_config_double_vlan(vsi, TRUE);
+ /* Set global registers with default ether type value */
+ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+ ETHER_TYPE_VLAN);
+ i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
+ ETHER_TYPE_VLAN);
+ }
else
i40e_vsi_config_double_vlan(vsi, FALSE);
}
@@ -3716,21 +3982,27 @@ i40e_veb_release(struct i40e_veb *veb)
struct i40e_vsi *vsi;
struct i40e_hw *hw;
- if (veb == NULL || veb->associate_vsi == NULL)
+ if (veb == NULL)
return -EINVAL;
if (!TAILQ_EMPTY(&veb->head)) {
PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
return -EACCES;
}
+ /* associate_vsi field is NULL for floating VEB */
+ if (veb->associate_vsi != NULL) {
+ vsi = veb->associate_vsi;
+ hw = I40E_VSI_TO_HW(vsi);
- vsi = veb->associate_vsi;
- hw = I40E_VSI_TO_HW(vsi);
+ vsi->uplink_seid = veb->uplink_seid;
+ vsi->veb = NULL;
+ } else {
+ veb->associate_pf->main_vsi->floating_veb = NULL;
+ hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
+ }
- vsi->uplink_seid = veb->uplink_seid;
i40e_aq_delete_element(hw, veb->seid, NULL);
rte_free(veb);
- vsi->veb = NULL;
return I40E_SUCCESS;
}
@@ -3742,9 +4014,9 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
int ret;
struct i40e_hw *hw;
- if (NULL == pf || vsi == NULL) {
- PMD_DRV_LOG(ERR, "veb setup failed, "
- "associated VSI shouldn't null");
+ if (pf == NULL) {
+ PMD_DRV_LOG(ERR,
+ "veb setup failed, associated PF shouldn't null");
return NULL;
}
hw = I40E_PF_TO_HW(pf);
@@ -3756,11 +4028,19 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
}
veb->associate_vsi = vsi;
+ veb->associate_pf = pf;
TAILQ_INIT(&veb->head);
- veb->uplink_seid = vsi->uplink_seid;
+ veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
- ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
- I40E_DEFAULT_TCMAP, false, &veb->seid, false, NULL);
+ /* create floating veb if vsi is NULL */
+ if (vsi != NULL) {
+ ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
+ I40E_DEFAULT_TCMAP, false,
+ &veb->seid, false, NULL);
+ } else {
+ ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
+ true, &veb->seid, false, NULL);
+ }
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
@@ -3776,10 +4056,10 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
hw->aq.asq_last_status);
goto fail;
}
-
/* Get VEB bandwidth, to be implemented */
/* Now associated vsi binding to the VEB, set uplink to this VEB */
- vsi->uplink_seid = veb->seid;
+ if (vsi)
+ vsi->uplink_seid = veb->seid;
return veb;
fail:
@@ -3795,6 +4075,7 @@ i40e_vsi_release(struct i40e_vsi *vsi)
struct i40e_vsi_list *vsi_list;
int ret;
struct i40e_mac_filter *f;
+ uint16_t user_param = vsi->user_param;
if (!vsi)
return I40E_SUCCESS;
@@ -3812,12 +4093,22 @@ i40e_vsi_release(struct i40e_vsi *vsi)
i40e_veb_release(vsi->veb);
}
+ if (vsi->floating_veb) {
+ TAILQ_FOREACH(vsi_list, &vsi->floating_veb->head, list) {
+ if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
+ return -1;
+ TAILQ_REMOVE(&vsi->floating_veb->head, vsi_list, list);
+ }
+ }
+
/* Remove all macvlan filters of the VSI */
i40e_vsi_remove_all_macvlan_filter(vsi);
TAILQ_FOREACH(f, &vsi->mac_list, next)
rte_free(f);
- if (vsi->type != I40E_VSI_MAIN) {
+ if (vsi->type != I40E_VSI_MAIN &&
+ ((vsi->type != I40E_VSI_SRIOV) ||
+ !pf->floating_veb_list[user_param])) {
/* Remove vsi from parent's sibling list */
if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
@@ -3831,6 +4122,24 @@ i40e_vsi_release(struct i40e_vsi *vsi)
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(ERR, "Failed to delete element");
}
+
+ if ((vsi->type == I40E_VSI_SRIOV) &&
+ pf->floating_veb_list[user_param]) {
+ /* Remove vsi from parent's sibling list */
+ if (vsi->parent_vsi == NULL ||
+ vsi->parent_vsi->floating_veb == NULL) {
+ PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
+ return I40E_ERR_PARAM;
+ }
+ TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
+ &vsi->sib_vsi_list, list);
+
+ /* Remove all switch element of the VSI */
+ ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to delete element");
+ }
+
i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
if (vsi->type != I40E_VSI_SRIOV)
@@ -3999,7 +4308,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
struct ether_addr broadcast =
{.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
- if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
+ if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
+ uplink_vsi == NULL) {
PMD_DRV_LOG(ERR, "VSI setup failed, "
"VSI link shouldn't be NULL");
return NULL;
@@ -4011,11 +4321,18 @@ i40e_vsi_setup(struct i40e_pf *pf,
return NULL;
}
- /* If uplink vsi didn't setup VEB, create one first */
- if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
+ /* two situations
+ * 1.type is not MAIN and uplink vsi is not NULL
+ * If uplink vsi didn't setup VEB, create one first under veb field
+ * 2.type is SRIOV and the uplink is NULL
+ * If floating VEB is NULL, create one veb under floating veb field
+ */
+
+ if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
+ uplink_vsi->veb == NULL) {
uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
- if (NULL == uplink_vsi->veb) {
+ if (uplink_vsi->veb == NULL) {
PMD_DRV_LOG(ERR, "VEB setup failed");
return NULL;
}
@@ -4023,6 +4340,16 @@ i40e_vsi_setup(struct i40e_pf *pf,
i40e_enable_pf_lb(pf);
}
+ if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
+ pf->main_vsi->floating_veb == NULL) {
+ pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
+
+ if (pf->main_vsi->floating_veb == NULL) {
+ PMD_DRV_LOG(ERR, "VEB setup failed");
+ return NULL;
+ }
+ }
+
vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
if (!vsi) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
@@ -4032,7 +4359,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
vsi->type = type;
vsi->adapter = I40E_PF_TO_ADAPTER(pf);
vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
- vsi->parent_vsi = uplink_vsi;
+ vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
vsi->user_param = user_param;
/* Allocate queues */
switch (vsi->type) {
@@ -4186,7 +4513,10 @@ i40e_vsi_setup(struct i40e_pf *pf,
* For other VSI, the uplink_seid equals to uplink VSI's
* uplink_seid since they share same VEB
*/
- vsi->uplink_seid = uplink_vsi->uplink_seid;
+ if (uplink_vsi == NULL)
+ vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
+ else
+ vsi->uplink_seid = uplink_vsi->uplink_seid;
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
ctxt.uplink_seid = vsi->uplink_seid;
@@ -4294,8 +4624,13 @@ i40e_vsi_setup(struct i40e_pf *pf,
vsi->seid = ctxt.seid;
vsi->vsi_id = ctxt.vsi_number;
vsi->sib_vsi_list.vsi = vsi;
- TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
- &vsi->sib_vsi_list, list);
+ if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
+ TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
+ &vsi->sib_vsi_list, list);
+ } else {
+ TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
+ &vsi->sib_vsi_list, list);
+ }
}
/* MAC/VLAN configuration */
@@ -9031,6 +9366,7 @@ static int i40e_get_regs(struct rte_eth_dev *dev,
arr_idx2++) {
reg_offset = arr_idx * reg_info->stride1 +
arr_idx2 * reg_info->stride2;
+ reg_offset += reg_info->base_addr;
ptr_data[reg_offset >> 2] =
i40e_read_rx_ctl(hw, reg_offset);
}
@@ -9046,6 +9382,7 @@ static int i40e_get_regs(struct rte_eth_dev *dev,
arr_idx2++) {
reg_offset = arr_idx * reg_info->stride1 +
arr_idx2 * reg_info->stride2;
+ reg_offset += reg_info->base_addr;
ptr_data[reg_offset >> 2] =
I40E_READ_REG(hw, reg_offset);
}
@@ -9104,3 +9441,34 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
/* Flags: 0x3 updates port address */
i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
}
+
+static int
+i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN
+ + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
+ int ret = 0;
+
+ /* check if mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR,
+ "port %d must be stopped before configuration\n",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev_data->dev_conf.rxmode.jumbo_frame = 0;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index cfd23999..92c8fad0 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -36,6 +36,7 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
+#include <rte_kvargs.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -55,6 +56,8 @@
#define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE)
/* Maximun number of MAC addresses */
#define I40E_NUM_MACADDR_MAX 64
+/* Maximum number of VFs */
+#define I40E_MAX_VF 128
/*
* vlan_id is a 12 bit number.
@@ -171,6 +174,10 @@ enum i40e_flxpld_layer_idx {
#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+/* Special FW support this floating VEB feature */
+#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
+#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+
struct i40e_adapter;
/**
@@ -219,6 +226,7 @@ struct i40e_bw_info {
struct i40e_veb {
struct i40e_vsi_list_head head;
struct i40e_vsi *associate_vsi; /* Associate VSI who owns the VEB */
+ struct i40e_pf *associate_pf; /* Associate PF who owns the VEB */
uint16_t seid; /* The seid of VEB itself */
uint16_t uplink_seid; /* The uplink seid of this VEB */
uint16_t stats_idx;
@@ -259,6 +267,7 @@ struct i40e_vsi {
struct i40e_vsi_list sib_vsi_list; /* sibling vsi list */
struct i40e_vsi *parent_vsi;
struct i40e_veb *veb; /* Associated veb, could be null */
+ struct i40e_veb *floating_veb; /* Associated floating veb */
bool offset_loaded;
enum i40e_vsi_type type; /* VSI types */
uint16_t vlan_num; /* Total VLAN number */
@@ -450,6 +459,9 @@ struct i40e_pf {
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
+ bool floating_veb; /* The flag to use the floating VEB */
+ /* The floating enable flag for the specific VF */
+ bool floating_veb_list[I40E_MAX_VF];
};
enum pending_msg {
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 2bce69b3..7b6df1d8 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -111,7 +111,10 @@ static int i40evf_dev_link_update(struct rte_eth_dev *dev,
static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
+static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
@@ -196,6 +199,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.link_update = i40evf_dev_link_update,
.stats_get = i40evf_dev_stats_get,
.xstats_get = i40evf_dev_xstats_get,
+ .xstats_get_names = i40evf_dev_xstats_get_names,
.xstats_reset = i40evf_dev_xstats_reset,
.dev_close = i40evf_dev_close,
.dev_infos_get = i40evf_dev_info_get,
@@ -214,6 +218,9 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
+ .rx_queue_count = i40e_dev_rx_queue_count,
+ .rxq_info_get = i40e_rxq_info_get,
+ .txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
.mac_addr_remove = i40evf_del_mac_addr,
.reta_update = i40evf_dev_rss_reta_update,
@@ -327,8 +334,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40evf_arq_msg_info info;
enum i40evf_aq_result ret;
- int err = -1;
- int i = 0;
+ int err, i = 0;
if (_atomic_set_cmd(vf, args->ops))
return -1;
@@ -349,19 +355,19 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
switch (args->ops) {
case I40E_VIRTCHNL_OP_RESET_VF:
/*no need to process in this function */
+ err = 0;
break;
case I40E_VIRTCHNL_OP_VERSION:
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
/* for init adminq commands, need to poll the response */
+ err = -1;
do {
ret = i40evf_read_pfmsg(dev, &info);
if (ret == I40EVF_MSG_CMD) {
err = 0;
break;
- } else if (ret == I40EVF_MSG_ERR) {
- err = -1;
+ } else if (ret == I40EVF_MSG_ERR)
break;
- }
rte_delay_ms(ASQ_DELAY_MS);
/* If don't read msg or read sys event, continue */
} while (i++ < MAX_TRY_TIMES);
@@ -370,6 +376,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
default:
/* for other adminq in running time, waiting the cmd done flag */
+ err = -1;
do {
if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) {
err = 0;
@@ -538,7 +545,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
struct vf_cmd_info args;
struct i40e_virtchnl_pvid_info tpid_info;
- if (dev == NULL || info == NULL) {
+ if (info == NULL) {
PMD_DRV_LOG(ERR, "invalid parameters");
return I40E_ERR_PARAM;
}
@@ -981,8 +988,23 @@ i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
}
+static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned i;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < I40EVF_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_i40evf_stats_strings[i].name);
+ }
+ return I40EVF_NB_XSTATS;
+}
+
static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n)
+ struct rte_eth_xstat *xstats, unsigned n)
{
int ret;
unsigned i;
@@ -1000,8 +1022,7 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
/* loop over xstats array and values from pstats */
for (i = 0; i < I40EVF_NB_XSTATS; i++) {
- snprintf(xstats[i].name, sizeof(xstats[i].name),
- "%s", rte_i40evf_stats_strings[i].name);
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)pstats) +
rte_i40evf_stats_strings[i].offset);
}
@@ -1567,6 +1588,8 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct i40e_vf *vf;
/* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
* allocation or vector Rx preconditions we will reset it.
@@ -1576,6 +1599,19 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
+ /* For non-DPDK PF drivers, VF has no ability to disable HW
+ * CRC strip, and is implicitly enabled by the PF.
+ */
+ if (!conf->rxmode.hw_strip_crc) {
+ vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+ (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) {
+ /* Peer is running non-DPDK PF driver. */
+ PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
+ return -EINVAL;
+ }
+ }
+
return i40evf_init_vlan(dev);
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index ff57e8a2..f65c4110 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -94,7 +94,9 @@
I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
(((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
- ((((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << \
+ ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \
+ NONUSE_FLX_PIT_DEST_OFF : \
+ ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \
I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
@@ -113,29 +115,11 @@
#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
-static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq);
-static int i40e_check_fdir_flex_conf(
- const struct rte_eth_fdir_flex_conf *conf);
-static void i40e_set_flx_pld_cfg(struct i40e_pf *pf,
- const struct rte_eth_flex_payload_cfg *cfg);
-static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
- enum i40e_filter_pctype pctype,
- const struct rte_eth_fdir_flex_mask *mask_cfg);
-static int i40e_fdir_construct_pkt(struct i40e_pf *pf,
- const struct rte_eth_fdir_input *fdir_input,
- unsigned char *raw_pkt);
-static int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *filter,
- bool add);
static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
-static void i40e_fdir_info_get(struct rte_eth_dev *dev,
- struct rte_eth_fdir_info *fdir);
-static void i40e_fdir_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_fdir_stats *stat);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -163,7 +147,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 0;
rx_ctx.l2tsel = 1;
- rx_ctx.showiv = 1;
+ rx_ctx.showiv = 0;
rx_ctx.prefena = 1;
err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
@@ -1159,7 +1143,8 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
fdirdp->dtype_cmd_cntindex |=
rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
fdirdp->dtype_cmd_cntindex |=
- rte_cpu_to_le_32((pf->fdir.match_counter_index <<
+ rte_cpu_to_le_32(
+ ((uint32_t)pf->fdir.match_counter_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 5afd61a0..d5b2d450 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -123,7 +123,8 @@ int
i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
{
uint32_t val, i;
- struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ struct i40e_hw *hw;
+ struct i40e_pf *pf;
uint16_t vf_id, abs_vf_id, vf_msix_num;
int ret;
struct i40e_virtchnl_queue_select qsel;
@@ -131,6 +132,8 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
if (vf == NULL)
return -EINVAL;
+ pf = vf->pf;
+ hw = I40E_PF_TO_HW(vf->pf);
vf_id = vf->vf_idx;
abs_vf_id = vf_id + hw->func_caps.vf_base_id;
@@ -224,8 +227,14 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
I40E_WRITE_FLUSH(hw);
/* Allocate resource again */
- vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
- vf->pf->main_vsi, vf->vf_idx);
+ if (pf->floating_veb && pf->floating_veb_list[vf_id]) {
+ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
+ NULL, vf->vf_idx);
+ } else {
+ vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
+ vf->pf->main_vsi, vf->vf_idx);
+ }
+
if (vf->vsi == NULL) {
PMD_DRV_LOG(ERR, "Add vsi failed");
return -EFAULT;
@@ -810,7 +819,7 @@ i40e_pf_host_process_cmd_config_promisc_mode(
if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC)
unicast = TRUE;
ret = i40e_aq_set_vsi_unicast_promiscuous(hw,
- vf->vsi->seid, unicast, NULL);
+ vf->vsi->seid, unicast, NULL, true);
if (ret != I40E_SUCCESS)
goto send_msg;
@@ -913,7 +922,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
- if (!dev || vf_id > pf->vf_num - 1 || !pf->vfs) {
+ if (vf_id > pf->vf_num - 1 || !pf->vfs) {
PMD_DRV_LOG(ERR, "invalid argument");
return;
}
@@ -996,11 +1005,9 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
break;
- /* Don't add command supported below, which will
- * return an error code.
+ /* Don't add command supported below, which will
+ * return an error code.
*/
- case I40E_VIRTCHNL_OP_FCOE:
- PMD_DRV_LOG(ERR, "OP_FCOE received, not supported");
default:
PMD_DRV_LOG(ERR, "%u received, not supported", opcode);
i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM,
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 4d35d83f..d3cfb98f 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -88,7 +88,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
{
if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
- mb->ol_flags |= PKT_RX_VLAN_PKT;
+ mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
mb->vlan_tci =
rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
@@ -99,7 +99,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
- mb->ol_flags |= PKT_RX_QINQ_PKT;
+ mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
@@ -140,27 +140,12 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword)
#define I40E_RX_ERR_BITS 0x3f
if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
return flags;
- /* If RXE bit set, all other status bits are meaningless */
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- flags |= PKT_RX_MAC_ERR;
- return flags;
- }
-
- /* If RECIPE bit set, all other status indications should be ignored */
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) {
- flags |= PKT_RX_RECIP_ERR;
- return flags;
- }
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT)))
- flags |= PKT_RX_HBUF_OVERFLOW;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
flags |= PKT_RX_IP_CKSUM_BAD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
flags |= PKT_RX_L4_CKSUM_BAD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
flags |= PKT_RX_EIP_CKSUM_BAD;
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT)))
- flags |= PKT_RX_OVERSIZE;
return flags;
}
@@ -719,6 +704,33 @@ i40e_rxd_pkt_type_mapping(uint8_t ptype)
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_ICMP,
+ /* L2 NSH packet type */
+ [154] = RTE_PTYPE_L2_ETHER_NSH,
+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
/* All others reserved */
};
@@ -841,17 +853,6 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
}
}
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
-
- return m;
-}
-
/* Construct the tx flags */
static inline uint64_t
i40e_build_ctob(uint32_t td_cmd,
@@ -1225,7 +1226,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
- nmb = rte_rxmbuf_alloc(rxq->mp);
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb))
break;
rxd = *rxdp;
@@ -1336,7 +1337,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
- nmb = rte_rxmbuf_alloc(rxq->mp);
+ nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb))
break;
rxd = *rxdp;
@@ -2598,8 +2599,8 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
return;
}
- if (!rxq || !rxq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
return;
}
@@ -2774,7 +2775,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union i40e_rx_desc *rxd;
- struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!mbuf)) {
PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
@@ -2904,7 +2905,12 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
rx_ctx.l2tsel = 1;
- rx_ctx.showiv = 1;
+ /* showiv indicates if inner VLAN is stripped inside of tunnel
+ * packet. When set it to 1, vlan information is stripped from
+ * the inner header, but the hardware does not put it in the
+ * descriptor. So set it zero by default.
+ */
+ rx_ctx.showiv = 0;
rx_ctx.prefena = 1;
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2981,13 +2987,15 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
struct i40e_tx_queue *txq;
const struct rte_memzone *tz = NULL;
uint32_t ring_size;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *dev;
if (!pf) {
PMD_DRV_LOG(ERR, "PF is not available");
return I40E_ERR_BAD_PTR;
}
+ dev = pf->adapter->eth_dev;
+
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("i40e fdir tx queue",
sizeof(struct i40e_tx_queue),
@@ -3035,13 +3043,15 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz = NULL;
uint32_t ring_size;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *dev;
if (!pf) {
PMD_DRV_LOG(ERR, "PF is not available");
return I40E_ERR_BAD_PTR;
}
+ dev = pf->adapter->eth_dev;
+
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("i40e fdir rx queue",
sizeof(struct i40e_rx_queue),
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index 047aff53..05cb415e 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -144,23 +144,24 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
uint64_t dword;
} vol;
- /* mask everything except rss and vlan flags
- *bit2 is for vlan tag, bits 13:12 for rss
- */
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
const __m128i rss_vlan_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
- 0x3004, 0x3004, 0x3004, 0x3004);
+ 0x3804, 0x3804, 0x3804, 0x3804);
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, PKT_RX_VLAN_PKT,
+ 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
0, 0, 0, 0);
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, 0,
- PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0);
+ PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
+ 0, 0, PKT_RX_FDIR, 0);
vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
@@ -169,7 +170,7 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
- rss = _mm_srli_epi16(vlan1, 12);
+ rss = _mm_srli_epi16(vlan1, 11);
rss = _mm_shuffle_epi8(rss_flags, rss);
vlan0 = _mm_or_si128(vlan0, rss);
@@ -184,41 +185,7 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
#endif
-#define PKTLEN_SHIFT (6)
-#define PKTLEN_MASK (0x3FFF)
-/* Handling the pkt len field is not aligned with 1byte, so shift is
- * needed to let it align
- */
-static inline void
-desc_pktlen_align(__m128i descs[4])
-{
- __m128i pktlen0, pktlen1, zero;
- union {
- uint16_t e[4];
- uint64_t dword;
- } vol;
-
- /* mask everything except pktlen field*/
- const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK,
- PKTLEN_MASK, PKTLEN_MASK);
-
- pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]);
- pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]);
- pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1);
-
- zero = _mm_xor_si128(pktlen0, pktlen0);
-
- pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT);
- pktlen0 = _mm_and_si128(pktlen0, pktlen_msk);
-
- pktlen0 = _mm_packs_epi32(pktlen0, zero);
- vol.dword = _mm_cvtsi128_si64(pktlen0);
- /* let the descriptor byte 15-14 store the pkt len */
- *((uint16_t *)&descs[0]+7) = vol.e[0];
- *((uint16_t *)&descs[1]+7) = vol.e[1];
- *((uint16_t *)&descs[2]+7) = vol.e[2];
- *((uint16_t *)&descs[3]+7) = vol.e[3];
-}
+#define PKTLEN_SHIFT 10
/*
* Notice:
@@ -331,18 +298,23 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
if (split_packet) {
- rte_prefetch0(&rx_pkts[pos]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
}
- /*shift the pktlen field*/
- desc_pktlen_align(descs);
-
/* avoid compiler reorder optimization */
rte_compiler_barrier();
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
+ const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
+ descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
+
/* D.1 pkt 3,4 convert format from desc to pktmbuf */
pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
@@ -358,6 +330,14 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
+ const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
+ descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
+
/* D.1 pkt 1,2 convert format from desc to pktmbuf */
pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
@@ -751,6 +731,10 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ /* need SSE4.1 support */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ return -1;
+
#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
/* whithout rx ol_flags, no VP flag report */
if (rxmode->hw_vlan_strip != 0 ||
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 50bf51c9..a6c71f34 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -43,7 +43,7 @@ EXPORT_MAP := rte_pmd_ixgbe_version.map
LIBABIVER := 1
-ifeq ($(CC), icc)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
@@ -51,7 +51,7 @@ CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
CFLAGS_ixgbe_rxtx.o += -wd3656
-else ifeq ($(CC), clang)
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
#
# CFLAGS for clang
#
@@ -108,7 +108,11 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
-SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec.c
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
+else
+SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c
+endif
ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index caa26640..76e78051 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -34,7 +34,7 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2016.01.07 released by ND. The sub-directory of base/
+cid-10g-shared-code.2016.04.12 released by ND. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
index 9e65fffa..db808801 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -995,17 +995,20 @@ STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VFTA
* @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ * @bypass_vlvf: boolean flag - unused
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on, bool bypass_vlvf)
{
u32 regindex;
u32 bitindex;
u32 bits;
u32 vftabyte;
+ UNREFERENCED_1PARAMETER(bypass_vlvf);
+
DEBUGFUNC("ixgbe_set_vfta_82598");
if (vlan > 4095)
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h
index 89dd11a5..0326e70b 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.h
+++ b/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -39,7 +39,8 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass);
s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 154c1f10..5bc7c2b9 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -1176,11 +1176,14 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
- hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+ hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+ IXGBE_CLEAR_VMDQ_ALL);
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index cf1e5169..17868676 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -209,6 +209,8 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
@@ -1078,33 +1080,38 @@ s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
* ixgbe_set_vfta - Set VLAN filter table
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFTA
- * @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN
+ * @vlvf_bypass: boolean flag indicating updating the default pool is okay
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
+ bool vlvf_bypass)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
- vlan_on), IXGBE_NOT_IMPLEMENTED);
+ vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_set_vlvf - Set VLAN Pool Filter
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- * @vfta_changed: pointer to boolean flag which indicates whether VFTA
- * should be changed
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VLVF
+ * @vfta_delta: pointer to the difference between the current value of VFTA
+ * and the desired value
+ * @vfta: the desired value of the VFTA
+ * @vlvf_bypass: boolean flag indicating updating the default pool is okay
*
* Turn on/off specified bit in VLVF table.
**/
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
- bool *vfta_changed)
+ u32 *vfta_delta, u32 vfta, bool vlvf_bypass)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
- vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
+ vlan_on, vfta_delta, vfta, vlvf_bypass),
+ IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -1637,6 +1644,20 @@ void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
hw->mac.ops.release_swfw_sync(hw, mask);
}
+/**
+ * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register.
+ * Regardless of whether is succeeds or not it then release the semaphore.
+ * This is function is called to recover from catastrophic failures that
+ * may have left the semaphore locked.
+ **/
+void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw)
+{
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+}
void ixgbe_disable_rx(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
index ae26a6ac..3aad1da7 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.h
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -124,9 +124,10 @@ s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on, bool vlvf_bypass);
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed);
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
@@ -191,6 +192,7 @@ s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw);
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
@@ -215,5 +217,7 @@ s32 ixgbe_handle_lasi(struct ixgbe_hw *hw);
void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed);
void ixgbe_disable_rx(struct ixgbe_hw *hw);
void ixgbe_enable_rx(struct ixgbe_hw *hw);
+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#endif /* _IXGBE_API_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index ec61408d..811875a4 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -135,6 +135,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
/* Flow Control */
mac->ops.fc_enable = ixgbe_fc_enable_generic;
mac->ops.setup_fc = ixgbe_setup_fc_generic;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg;
/* Link */
mac->ops.get_link_capabilities = NULL;
@@ -1020,24 +1021,33 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
* ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
* @hw: pointer to the HW structure
*
- * Determines the LAN function id by reading memory-mapped registers
- * and swaps the port value if requested.
+ * Determines the LAN function id by reading memory-mapped registers and swaps
+ * the port value if requested, and set MAC instance for devices that share
+ * CS4227.
**/
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
u32 reg;
+ u16 ee_ctrl_4;
DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
- bus->lan_id = bus->func;
+ bus->lan_id = (u8)bus->func;
/* check for a port swap */
reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
+
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ }
}
/**
@@ -2397,10 +2407,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
hw->mac.addr[4], hw->mac.addr[5]);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
- /* clear VMDq pool/queue selection for RAR 0 */
- hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
}
+
+ /* clear VMDq pool/queue selection for RAR 0 */
+ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+
hw->addr_ctrl.overflow_promisc = 0;
hw->addr_ctrl.rar_used_count = 1;
@@ -2729,7 +2740,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
}
/* Negotiate the fc mode to use */
- ixgbe_fc_autoneg(hw);
+ hw->mac.ops.fc_autoneg(hw);
/* Disable any previous flow control settings */
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
@@ -2839,7 +2850,7 @@ out:
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
-STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg))) {
@@ -3799,68 +3810,65 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* return the VLVF index where this VLAN id should be placed
*
**/
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
{
- u32 bits = 0;
- u32 first_empty_slot = 0;
- s32 regindex;
+ s32 regindex, first_empty_slot;
+ u32 bits;
/* short cut the special case */
if (vlan == 0)
return 0;
- /*
- * Search for the vlan id in the VLVF entries. Save off the first empty
- * slot found along the way
- */
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ /* if vlvf_bypass is set we don't want to use an empty slot, we
+ * will simply bypass the VLVF if there are no entries present in the
+ * VLVF that contain our VLAN
+ */
+ first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
+
+ /* add VLAN enable bit for comparison */
+ vlan |= IXGBE_VLVF_VIEN;
+
+ /* Search for the vlan id in the VLVF entries. Save off the first empty
+ * slot found along the way.
+ *
+ * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
+ */
+ for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !(first_empty_slot))
+ if (bits == vlan)
+ return regindex;
+ if (!first_empty_slot && !bits)
first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
}
- /*
- * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
- * in the VLVF. Else use the first empty VLVF register for this
- * vlan id.
- */
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
- "No space in VLVF.\n");
- regindex = IXGBE_ERR_NO_SPACE;
- }
- }
+ /* If we are here then we didn't find the VLAN. Return first empty
+ * slot we found during our search, else error.
+ */
+ if (!first_empty_slot)
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
- return regindex;
+ return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
}
/**
* ixgbe_set_vfta_generic - Set VLAN filter table
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on, bool vlvf_bypass)
{
- s32 regindex;
- u32 bitindex;
- u32 vfta;
- u32 targetbit;
- s32 ret_val = IXGBE_SUCCESS;
- bool vfta_changed = false;
+ u32 regidx, vfta_delta, vfta;
+ s32 ret_val;
DEBUGFUNC("ixgbe_set_vfta_generic");
- if (vlan > 4095)
+ if (vlan > 4095 || vind > 63)
return IXGBE_ERR_PARAM;
/*
@@ -3875,33 +3883,32 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* bits[11-5]: which register
* bits[4-0]: which bit in the register
*/
- regindex = (vlan >> 5) & 0x7F;
- bitindex = vlan & 0x1F;
- targetbit = (1 << bitindex);
- vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
-
- if (vlan_on) {
- if (!(vfta & targetbit)) {
- vfta |= targetbit;
- vfta_changed = true;
- }
- } else {
- if ((vfta & targetbit)) {
- vfta &= ~targetbit;
- vfta_changed = true;
- }
- }
+ regidx = vlan / 32;
+ vfta_delta = 1 << (vlan % 32);
+ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
+
+ /* vfta_delta represents the difference between the current value
+ * of vfta and the value we want in the register. Since the diff
+ * is an XOR mask we can just update the vfta using an XOR
+ */
+ vfta_delta &= vlan_on ? ~vfta : vfta;
+ vfta ^= vfta_delta;
/* Part 2
* Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
*/
- ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
- &vfta_changed);
- if (ret_val != IXGBE_SUCCESS)
+ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
+ vfta, vlvf_bypass);
+ if (ret_val != IXGBE_SUCCESS) {
+ if (vlvf_bypass)
+ goto vfta_update;
return ret_val;
+ }
- if (vfta_changed)
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+vfta_update:
+ /* Update VFTA now that we are ready for traffic */
+ if (vfta_delta)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
return IXGBE_SUCCESS;
}
@@ -3910,21 +3917,25 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* ixgbe_set_vlvf_generic - Set VLAN Pool Filter
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
- * @vind: VMDq output index that maps queue to VLAN id in VFVFB
- * @vlan_on: boolean flag to turn on/off VLAN in VFVF
- * @vfta_changed: pointer to boolean flag which indicates whether VFTA
- * should be changed
+ * @vind: VMDq output index that maps queue to VLAN id in VLVFB
+ * @vlan_on: boolean flag to turn on/off VLAN in VLVF
+ * @vfta_delta: pointer to the difference between the current value of VFTA
+ * and the desired value
+ * @vfta: the desired value of the VFTA
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified bit in VLVF table.
**/
s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed)
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass)
{
- u32 vt;
+ u32 bits;
+ s32 vlvf_index;
DEBUGFUNC("ixgbe_set_vlvf_generic");
- if (vlan > 4095)
+ if (vlan > 4095 || vind > 63)
return IXGBE_ERR_PARAM;
/* If VT Mode is set
@@ -3934,82 +3945,57 @@ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- if (vt & IXGBE_VT_CTL_VT_ENABLE) {
- s32 vlvf_index;
- u32 bits;
-
- vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
- if (vlvf_index < 0)
- return vlvf_index;
-
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2),
- bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits |= (1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1),
- bits);
- }
- } else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2),
- bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1));
- bits &= ~(1 << (vind - 32));
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((vlvf_index * 2) + 1),
- bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(vlvf_index * 2));
- }
- }
+ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
+ return IXGBE_SUCCESS;
+ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
+ if (vlvf_index < 0)
+ return vlvf_index;
- /*
- * If there are still bits set in the VLVFB registers
- * for the VLAN ID indicated we need to see if the
- * caller is requesting that we clear the VFTA entry bit.
- * If the caller has requested that we clear the VFTA
- * entry bit but there are still pools/VFs using this VLAN
- * ID entry then ignore the request. We're not worried
- * about the case where we're turning the VFTA VLAN ID
- * entry bit on, only when requested to turn it off as
- * there may be multiple pools and/or VFs using the
- * VLAN ID entry. In that case we cannot clear the
- * VFTA bit until all pools/VFs using that VLAN ID have also
- * been cleared. This will be indicated by "bits" being
- * zero.
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
+
+ /* set the pool bit */
+ bits |= 1 << (vind % 32);
+ if (vlan_on)
+ goto vlvf_update;
+
+ /* clear the pool bit */
+ bits ^= 1 << (vind % 32);
+
+ if (!bits &&
+ !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
+ /* Clear VFTA first, then disable VLVF. Otherwise
+ * we run the risk of stray packets leaking into
+ * the PF via the default pool
*/
- if (bits) {
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
- (IXGBE_VLVF_VIEN | vlan));
- if ((!vlan_on) && (vfta_changed != NULL)) {
- /* someone wants to clear the vfta entry
- * but some pools/VFs are still using it.
- * Ignore it. */
- *vfta_changed = false;
- }
- } else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ if (vfta_delta)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
+
+ /* disable VLVF and clear remaining bit from pool */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
+
+ return IXGBE_SUCCESS;
}
+ /* If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ *vfta_delta = 0;
+
+vlvf_update:
+ /* record pool change and enable VLAN ID if not already enabled */
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
return IXGBE_SUCCESS;
}
@@ -4032,7 +4018,7 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
}
return IXGBE_SUCCESS;
@@ -4218,43 +4204,25 @@ out:
/**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
- * @enable: enable or disable switch for anti-spoofing
- * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ * @enable: enable or disable switch for MAC anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
*
**/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{
- int j;
- int pf_target_reg = pf >> 3;
- int pf_target_shift = pf % 8;
- u32 pfvfspoof = 0;
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
- /*
- * PFVFSPOOF register array is size 8 with 8 bits assigned to
- * MAC anti-spoof enables in each register array element.
- */
- for (j = 0; j < pf_target_reg; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Do not set the bits assigned to the PF
- */
- pfvfspoof &= (1 << pf_target_shift) - 1;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * Remaining pools belong to the PF so they do not need to have
- * anti-spoofing enabled.
- */
- for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+ pfvfspoof |= (1 << vf_target_shift);
+ else
+ pfvfspoof &= ~(1 << vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
/**
@@ -4363,8 +4331,9 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* So we will leave this up to the caller to read back the data
* in these cases.
*
- * Communicates with the manageability block. On success return IXGBE_SUCCESS
- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ * Communicates with the manageability block. On success return IXGBE_SUCCESS
+ * else returns semaphore error when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
**/
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data)
@@ -4373,6 +4342,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
u16 buf_len;
u16 dword_len;
+ s32 status;
DEBUGFUNC("ixgbe_host_interface_command");
@@ -4380,6 +4350,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ if (status)
+ return status;
+
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
@@ -4388,13 +4364,15 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
if ((hicr & IXGBE_HICR_EN) == 0) {
DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
if ((length % (sizeof(u32))) != 0) {
DEBUGOUT("Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto rel_out;
}
dword_len = length >> 2;
@@ -4421,11 +4399,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
ERROR_REPORT1(IXGBE_ERROR_CAUTION,
"Command has failed with no status valid.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
if (!return_data)
- return 0;
+ goto rel_out;
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
@@ -4439,11 +4418,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* If there is any thing in data position pull it in */
buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
if (buf_len == 0)
- return 0;
+ goto rel_out;
if (length < buf_len + hdr_size) {
DEBUGOUT("Buffer not large enough for reply message.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs, add 3 for odd lengths */
@@ -4455,7 +4435,10 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
IXGBE_LE32_TO_CPUS(&buffer[bi]);
}
- return 0;
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ return status;
}
/**
@@ -4480,12 +4463,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
- != IXGBE_SUCCESS) {
- ret_val = IXGBE_ERR_SWFW_SYNC;
- goto out;
- }
-
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
@@ -4517,8 +4494,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
break;
}
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
-out:
return ret_val;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
index fd67a889..0545f85c 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -133,11 +133,12 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on, bool vlvf_bypass);
s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool *vfta_changed);
+ bool vlan_on, u32 *vfta_delta, u32 vfta,
+ bool vlvf_bypass);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -147,7 +148,7 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h
index 4a120a3d..d775142d 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -109,7 +109,9 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
/* mailbox API, version 1.2 VF requests */
-#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C
+#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 40b0b512..31cc1bef 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -96,6 +96,7 @@ enum {
#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
+#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i)
#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i)
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 6ed685e8..ed1b14f3 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -454,6 +454,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case X557_PHY_ID:
phy_type = ixgbe_phy_x550em_ext_t;
break;
+ case IXGBE_M88E1500_E_PHY_ID:
+ phy_type = ixgbe_phy_m88;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -615,13 +618,12 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
DEBUGFUNC("ixgbe_read_phy_reg_generic");
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
- status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
- phy_data);
- hw->mac.ops.release_swfw_sync(hw, gssr);
- } else {
- status = IXGBE_ERR_SWFW_SYNC;
- }
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
return status;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h
index 1a5affe5..281f9faf 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.h
+++ b/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -89,8 +89,11 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_CS4227 0xBE /* CS4227 address */
#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
+#define IXGBE_CS4223_PHY_ID 0x7003/* Quad port */
+#define IXGBE_CS4227_PHY_ID 0x3003/* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 4dce2ac1..83818a96 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -133,12 +133,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
-#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15C6
-#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA
#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC
#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
@@ -194,7 +196,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_FLA_X540 IXGBE_FLA
#define IXGBE_FLA_X550 IXGBE_FLA
#define IXGBE_FLA_X550EM_x IXGBE_FLA
-#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA_X550EM_a 0x15F68
#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
@@ -1078,16 +1080,40 @@ struct ixgbe_dmac_config {
#define IXGBE_PCIEPIPEDAT 0x11008
#define IXGBE_GSCL_1 0x11010
#define IXGBE_GSCL_2 0x11014
+#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1
+#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2
#define IXGBE_GSCL_3 0x11018
#define IXGBE_GSCL_4 0x1101C
#define IXGBE_GSCN_0 0x11020
#define IXGBE_GSCN_1 0x11024
#define IXGBE_GSCN_2 0x11028
#define IXGBE_GSCN_3 0x1102C
+#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0
+#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1
+#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2
+#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3
#define IXGBE_FACTPS 0x10150
#define IXGBE_FACTPS_X540 IXGBE_FACTPS
+#define IXGBE_GSCL_1_X550 0x11800
+#define IXGBE_GSCL_2_X550 0x11804
+#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550
+#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550
+#define IXGBE_GSCN_0_X550 0x11820
+#define IXGBE_GSCN_1_X550 0x11824
+#define IXGBE_GSCN_2_X550 0x11828
+#define IXGBE_GSCN_3_X550 0x1182C
+#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550
+#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550
+#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550
+#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550
#define IXGBE_FACTPS_X550 IXGBE_FACTPS
#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS
+#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550
+#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550
+#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550
+#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550
+#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550
+#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550
#define IXGBE_FACTPS_X550EM_a 0x15FEC
#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS)
@@ -1124,6 +1150,10 @@ struct ixgbe_dmac_config {
#define IXGBE_GSCL_6_82599 0x11034
#define IXGBE_GSCL_7_82599 0x11038
#define IXGBE_GSCL_8_82599 0x1103C
+#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599
+#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599
+#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599
+#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599
#define IXGBE_PHYADR_82599 0x11040
#define IXGBE_PHYDAT_82599 0x11044
#define IXGBE_PHYCTL_82599 0x11048
@@ -1134,10 +1164,22 @@ struct ixgbe_dmac_config {
#define IXGBE_CIAD_82599 IXGBE_CIAD
#define IXGBE_CIAA_X540 IXGBE_CIAA
#define IXGBE_CIAD_X540 IXGBE_CIAD
+#define IXGBE_GSCL_5_X550 0x11810
+#define IXGBE_GSCL_6_X550 0x11814
+#define IXGBE_GSCL_7_X550 0x11818
+#define IXGBE_GSCL_8_X550 0x1181C
+#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550
+#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550
+#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550
+#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550
#define IXGBE_CIAA_X550 0x11508
#define IXGBE_CIAD_X550 0x11510
#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550
#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550
+#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550
+#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550
+#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550
+#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550
#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550
#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550
#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA)
@@ -1472,6 +1514,7 @@ struct ixgbe_dmac_config {
#define IXGBE_CORECTL_WRITE_CMD 0x00010000
/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
@@ -1606,7 +1649,8 @@ struct ixgbe_dmac_config {
#define ATH_PHY_ID 0x03429050
/* PHY Types */
-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
+#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0
+#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
@@ -2247,6 +2291,9 @@ enum {
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_EEPROM_CTRL_4 0x45
+#define IXGBE_EE_CTRL_4_INST_ID 0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
@@ -2358,6 +2405,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7)
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
@@ -2820,7 +2868,7 @@ enum {
#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P)))
#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P)))
#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P)))
-#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P)))
+#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P)))
#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
@@ -3003,6 +3051,12 @@ enum ixgbe_fdir_pballoc_type {
/* Host Interface Command Structures */
+#ifdef C99
+#pragma pack(push, 1)
+#else
+#pragma pack(1)
+#endif /* C99 */
+
struct ixgbe_hic_hdr {
u8 cmd;
u8 buf_len;
@@ -3080,17 +3134,22 @@ struct ixgbe_hic_internal_phy_req {
struct ixgbe_hic_hdr hdr;
u8 port_number;
u8 command_type;
- u16 address;
+ __be16 address;
u16 rsv1;
- u32 write_data;
+ __le32 write_data;
u16 pad;
};
struct ixgbe_hic_internal_phy_resp {
struct ixgbe_hic_hdr hdr;
- u32 read_data;
+ __le32 read_data;
};
+#ifdef C99
+#pragma pack(pop)
+#else
+#pragma pack()
+#endif /* C99 */
/* Transmit Descriptor - Legacy */
struct ixgbe_legacy_tx_desc {
@@ -3246,6 +3305,7 @@ typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
+#define IXGBE_LINK_SPEED_10_FULL 0x0004
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
@@ -3511,6 +3571,8 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_intel,
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
+ ixgbe_phy_sgmii,
+ ixgbe_phy_m88,
ixgbe_phy_generic
};
@@ -3567,6 +3629,14 @@ enum ixgbe_fc_mode {
ixgbe_fc_default
};
+/* Master/slave control */
+enum ixgbe_ms_type {
+ ixgbe_ms_hw_default = 0,
+ ixgbe_ms_force_master,
+ ixgbe_ms_force_slave,
+ ixgbe_ms_auto
+};
+
/* Smart Speed Settings */
#define IXGBE_SMARTSPEED_MAX_RETRIES 3
enum ixgbe_smart_speed {
@@ -3626,7 +3696,8 @@ struct ixgbe_bus_info {
enum ixgbe_bus_type type;
u16 func;
- u16 lan_id;
+ u8 lan_id;
+ u16 instance_id;
};
/* Flow control parameters */
@@ -3766,6 +3837,7 @@ struct ixgbe_mac_operations {
s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*init_swfw_sync)(struct ixgbe_hw *);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@@ -3805,8 +3877,9 @@ struct ixgbe_mac_operations {
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
- s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
+ s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32,
+ bool);
s32 (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
@@ -3814,6 +3887,7 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
s32 (*setup_fc)(struct ixgbe_hw *);
+ void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3941,6 +4015,8 @@ struct ixgbe_phy_info {
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
ixgbe_link_speed speeds_supported;
+ enum ixgbe_ms_type ms_type;
+ enum ixgbe_ms_type original_ms_type;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -4057,8 +4133,12 @@ struct ixgbe_hw {
#define IXGBE_FUSES0_REV_MASK (3 << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
@@ -4072,18 +4152,29 @@ struct ixgbe_hw {
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
+#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
+#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10)
+#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11)
+
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19)
+
#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
@@ -4116,6 +4207,11 @@ struct ixgbe_hw {
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1 << 1)
+#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M (1 << 23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
index 40dc1c8c..a75074a5 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -362,8 +362,10 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+ return IXGBE_ERR_MBX;
+ }
return ret_val;
}
@@ -422,13 +424,15 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if true then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
**/
-s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 ret_val;
- UNREFERENCED_1PARAMETER(vind);
+ UNREFERENCED_2PARAMETER(vind, vlvf_bypass);
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h
index 411152a4..8851cb82 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.h
+++ b/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -31,8 +31,8 @@ POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
-#ifndef __IXGBE_VF_H__
-#define __IXGBE_VF_H__
+#ifndef _IXGBE_VF_H_
+#define _IXGBE_VF_H_
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
@@ -131,7 +131,8 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr,
bool clear);
-s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass);
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
index 9ade1b5e..31dead0d 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -99,6 +99,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
+ mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540;
mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
@@ -268,11 +269,14 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
- hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+ hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+ IXGBE_CLEAR_VMDQ_ALL);
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
@@ -943,6 +947,25 @@ STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function reset hardware semaphore bits for a semaphore that may
+ * have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+ /* First try to grab the semaphore but we don't need to bother
+ * looking to see whether we got the lock or not since we do
+ * the same thing regardless of whether we got the lock or not.
+ * We got the lock - we release it.
+ * We timeout trying to get the lock - we force its release.
+ */
+ ixgbe_get_swfw_sync_semaphore(hw);
+ ixgbe_release_swfw_sync_semaphore(hw);
+}
+
+/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h
index 42c08a82..e4baf6ff 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.h
+++ b/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -59,6 +59,7 @@ s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index 0bbaa55b..aa6e859f 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -39,8 +39,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_phy.h"
STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
-static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
-static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
+STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
+STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
/**
* ixgbe_init_ops_X550 - Inits func ptrs and MAC type
@@ -329,6 +329,137 @@ STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @dev_type: always unused
+ * @phy_data: Pointer to read data from PHY register
+ */
+STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 dev_type, u16 *phy_data)
+{
+ u32 i, data, command;
+ UNREFERENCED_1PARAMETER(dev_type);
+
+ /* Setup and write the read command */
+ command = (reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+
+ if (command & IXGBE_MSCA_MDI_COMMAND) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY read command did not complete.\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /* Read operation is complete. Get the data from MSRWD */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)data;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @dev_type: always unused
+ * @phy_data: Data to write to the PHY register
+ */
+STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 dev_type, u16 phy_data)
+{
+ u32 i, command;
+ UNREFERENCED_1PARAMETER(dev_type);
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the write command */
+ command = (reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+
+ if (command & IXGBE_MSCA_MDI_COMMAND) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_phy_1g - Get 1g PHY type based on device id
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+STATIC s32 ixgbe_identify_phy_1g(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u16 phy_id_high;
+ u16 phy_id_low;
+ s32 rc;
+
+ rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_MDIO_PHY_ID_HIGH, 0,
+ &phy_id_high);
+ if (rc)
+ goto rel_out;
+
+ rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_MDIO_PHY_ID_LOW, 0,
+ &phy_id_low);
+ if (rc)
+ goto rel_out;
+
+ hw->phy.id = (u32)phy_id_high << 16;
+ hw->phy.id |= phy_id_low & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = (u32)phy_id_low & ~IXGBE_PHY_REVISION_MASK;
+
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ return rc;
+}
+
+/**
* ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -338,7 +469,8 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
- hw->phy.phy_semaphore_mask = IXGBE_GSSR_TOKEN_SM;
+ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
+ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
else
@@ -364,10 +496,17 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
+ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ return ixgbe_identify_phy_1g(hw);
default:
break;
}
@@ -397,7 +536,7 @@ STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -412,7 +551,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32
+STATIC s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
@@ -428,7 +567,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -443,7 +582,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
*
* Returns an error code on error.
**/
-static s32
+STATIC s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
@@ -512,8 +651,8 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
link->addr = IXGBE_CS4227;
}
if (hw->mac.type == ixgbe_mac_X550EM_a) {
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
}
@@ -527,12 +666,21 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
mac->ops.setup_fc = ixgbe_setup_fc_generic;
+ else if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ mac->ops.setup_fc = ixgbe_setup_fc_x550a;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_x550a;
+ }
else
mac->ops.setup_fc = ixgbe_setup_fc_X550em;
-
- if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR)
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ break;
+ default:
mac->ops.setup_eee = NULL;
+ }
/* PHY */
phy->ops.init = ixgbe_init_phy_ops_X550em;
@@ -717,6 +865,116 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_enable_eee_x550 - Enable EEE support
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_enable_eee_x550(struct ixgbe_hw *hw)
+{
+ u16 autoneg_eee_reg;
+ u32 link_reg;
+ s32 status;
+
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Advertise EEE capability */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_eee_reg);
+
+ autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_eee_reg);
+ return IXGBE_SUCCESS;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
+
+ /* Don't advertise FEC capability when EEE enabled. */
+ link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ break;
+ default:
+ break;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_disable_eee_x550 - Disable EEE support
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_disable_eee_x550(struct ixgbe_hw *hw)
+{
+ u16 autoneg_eee_reg;
+ u32 link_reg;
+ s32 status;
+
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Disable advertised EEE capability */
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_eee_reg);
+
+ autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
+ IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
+
+ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_eee_reg);
+ return IXGBE_SUCCESS;
+ }
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
+
+ /* Advertise FEC capability when EEE is disabled. */
+ link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ if (status != IXGBE_SUCCESS)
+ return status;
+ break;
+ default:
+ break;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_setup_eee_X550 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @enable_eee: boolean flag to enable EEE
@@ -728,10 +986,8 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
**/
s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
{
- u32 eeer;
- u16 autoneg_eee_reg;
- u32 link_reg;
s32 status;
+ u32 eeer;
DEBUGFUNC("ixgbe_setup_eee_X550");
@@ -740,75 +996,20 @@ s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
if (enable_eee) {
eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Advertise EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
-
- autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
- } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
- /* Not supported on first revision of X550EM_x. */
- if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
- !(IXGBE_FUSES0_REV_MASK &
- IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
- return IXGBE_SUCCESS;
-
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
-
- /* Don't advertise FEC capability when EEE enabled. */
- link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- }
+ /* Not supported on first revision of X550EM_x. */
+ if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
+ !(IXGBE_FUSES0_REV_MASK &
+ IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
+ return IXGBE_SUCCESS;
+ status = ixgbe_enable_eee_x550(hw);
+ if (status)
+ return status;
} else {
eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Disable advertised EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
-
- autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
- } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
-
- /* Advertise FEC capability when EEE is disabled. */
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- }
+ status = ixgbe_disable_eee_x550(hw);
+ if (status)
+ return status;
}
IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
@@ -1007,6 +1208,7 @@ s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
token_cmd.port_number = hw->bus.lan_id;
token_cmd.command_type = FW_PHY_TOKEN_REQ;
token_cmd.pad = 0;
@@ -1037,6 +1239,7 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
token_cmd.port_number = hw->bus.lan_id;
token_cmd.command_type = FW_PHY_TOKEN_REL;
token_cmd.pad = 0;
@@ -1048,6 +1251,8 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
return status;
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return IXGBE_SUCCESS;
+
+ DEBUGOUT("Put PHY Token host interface command failed");
return IXGBE_ERR_FW_RESP_INVALID;
}
@@ -1060,23 +1265,24 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
* @data: Data to write to the register
**/
s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data)
+ u32 device_type, u32 data)
{
struct ixgbe_hic_internal_phy_req write_cmd;
s32 status;
UNREFERENCED_1PARAMETER(device_type);
+ memset(&write_cmd, 0, sizeof(write_cmd));
write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
write_cmd.port_number = hw->bus.lan_id;
write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
- write_cmd.address = (u16)reg_addr;
- write_cmd.rsv1 = 0;
- write_cmd.write_data = data;
- write_cmd.pad = 0;
+ write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
+ write_cmd.write_data = IXGBE_CPU_TO_LE32(data);
status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
- sizeof(write_cmd), IXGBE_HI_COMMAND_TIMEOUT, false);
+ sizeof(write_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -1090,26 +1296,29 @@ s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* @data: Pointer to read data from the register
**/
s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data)
+ u32 device_type, u32 *data)
{
- struct ixgbe_hic_internal_phy_req read_cmd;
+ union {
+ struct ixgbe_hic_internal_phy_req cmd;
+ struct ixgbe_hic_internal_phy_resp rsp;
+ } hic;
s32 status;
UNREFERENCED_1PARAMETER(device_type);
- read_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
- read_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
- read_cmd.port_number = hw->bus.lan_id;
- read_cmd.command_type = FW_INT_PHY_REQ_READ;
- read_cmd.address = (u16)reg_addr;
- read_cmd.rsv1 = 0;
- read_cmd.write_data = 0;
- read_cmd.pad = 0;
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+ hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
- status = ixgbe_host_interface_command(hw, (u32 *)&read_cmd,
- sizeof(read_cmd), IXGBE_HI_COMMAND_TIMEOUT, true);
+ status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
/* Extract the register value from the response. */
- *data = ((struct ixgbe_hic_internal_phy_resp *)&read_cmd)->read_data;
+ *data = IXGBE_LE32_TO_CPU(hic.rsp.read_data);
return status;
}
@@ -1286,10 +1495,18 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ media_type = ixgbe_media_type_copper;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ media_type = ixgbe_media_type_backplane;
+ hw->phy.type = ixgbe_phy_sgmii;
+ break;
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
- case IXGBE_DEV_ID_X550EM_A_10G_T:
media_type = ixgbe_media_type_copper;
+ hw->phy.type = ixgbe_phy_m88;
break;
default:
media_type = ixgbe_media_type_unknown;
@@ -1382,6 +1599,57 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval;
+ s32 rc;
+ UNREFERENCED_2PARAMETER(speed, autoneg_wait_to_complete);
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+
+ return rc;
+}
+
+/**
* ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
*/
@@ -1391,8 +1659,8 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
- switch (hw->mac.ops.get_media_type(hw)) {
- case ixgbe_media_type_fiber:
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
/* CS4227 does not support autoneg, so disable the laser control
* functions for SFP+ fiber
*/
@@ -1400,17 +1668,28 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
- mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
mac->ops.set_rate_select_speed =
ixgbe_set_soft_rate_select_speed;
+ if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
+ (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550a;
+ else
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550em;
break;
case ixgbe_media_type_copper:
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
mac->ops.check_link = ixgbe_check_link_t_X550em;
break;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+ mac->ops.setup_link = ixgbe_setup_sgmii;
+ break;
default:
break;
- }
+ }
}
/**
@@ -1447,8 +1726,19 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
- *speed = IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+ switch (hw->phy.type) {
+ case ixgbe_phy_m88:
+ *speed = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case ixgbe_phy_sgmii:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
*autoneg = true;
}
@@ -1644,9 +1934,9 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
return status;
@@ -1664,14 +1954,210 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
/* Restart auto-negotiation. */
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
}
/**
+ * ixgbe_set_master_slave_mode - Set up PHY for master/slave mode
+ * @hw: pointer to hardware structure
+ *
+ * Must be called while holding the PHY semaphore and token
+ */
+STATIC s32 ixgbe_set_master_slave_mode(struct ixgbe_hw *hw)
+{
+ u16 phy_data;
+ s32 rc;
+
+ /* Resolve master/slave mode */
+ rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_M88E1500_1000T_CTRL, 0,
+ &phy_data);
+ if (rc)
+ return rc;
+
+ /* load defaults for future use */
+ if (phy_data & IXGBE_M88E1500_1000T_CTRL_MS_ENABLE) {
+ if (phy_data & IXGBE_M88E1500_1000T_CTRL_MS_VALUE)
+ hw->phy.original_ms_type = ixgbe_ms_force_master;
+ else
+ hw->phy.original_ms_type = ixgbe_ms_force_slave;
+ } else {
+ hw->phy.original_ms_type = ixgbe_ms_auto;
+ }
+
+ switch (hw->phy.ms_type) {
+ case ixgbe_ms_force_master:
+ phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
+ phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_VALUE;
+ break;
+ case ixgbe_ms_force_slave:
+ phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
+ phy_data &= ~IXGBE_M88E1500_1000T_CTRL_MS_VALUE;
+ break;
+ case ixgbe_ms_auto:
+ phy_data &= ~IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
+ break;
+ default:
+ break;
+ }
+
+ return ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_1000T_CTRL, 0,
+ phy_data);
+}
+
+/**
+ * ixgbe_reset_phy_m88_nolock - Reset m88 PHY without locking
+ * @hw: pointer to hardware structure
+ *
+ * Must be called while holding the PHY semaphore and token
+ */
+STATIC s32 ixgbe_reset_phy_m88_nolock(struct ixgbe_hw *hw)
+{
+ s32 rc;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_FIBER_CTRL, 0,
+ IXGBE_M88E1500_FIBER_CTRL_RESET |
+ IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
+ IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB);
+ if (rc)
+ goto res_out;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 18);
+ if (rc)
+ goto res_out;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_GEN_CTRL, 0,
+ IXGBE_M88E1500_GEN_CTRL_RESET |
+ IXGBE_M88E1500_GEN_CTRL_SGMII_COPPER);
+ if (rc)
+ goto res_out;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ if (rc)
+ goto res_out;
+
+ rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
+ IXGBE_M88E1500_COPPER_CTRL_RESET |
+ IXGBE_M88E1500_COPPER_CTRL_AN_EN |
+ IXGBE_M88E1500_COPPER_CTRL_RESTART_AN |
+ IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX |
+ IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB);
+
+res_out:
+ ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ return rc;
+}
+
+/**
+ * ixgbe_reset_phy_m88 - Reset m88 PHY
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_reset_phy_m88(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ s32 rc;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_reset_phy_m88_nolock(hw);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return rc;
+}
+
+/**
+ * ixgbe_setup_m88 - setup m88 PHY
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_setup_m88(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ u16 phy_data;
+ s32 rc;
+
+ if (phy->reset_disable || ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_M88E1500_PHY_SPEC_CTRL, 0,
+ &phy_data);
+ if (rc)
+ goto rel_out;
+
+ /* Enable downshift and setting it to X6 */
+ phy_data &= ~IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE;
+ phy_data |= IXGBE_M88E1500_PSCR_DOWNSHIFT_6X;
+ phy_data |= IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE;
+ rc = ixgbe_write_phy_reg_mdi_22(hw,
+ IXGBE_M88E1500_PHY_SPEC_CTRL, 0,
+ phy_data);
+ if (rc)
+ goto rel_out;
+
+ ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+
+ /* Commit the changes */
+ rc = ixgbe_reset_phy_m88_nolock(hw);
+ if (rc) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ goto rel_out;
+ }
+
+ rc = ixgbe_set_master_slave_mode(hw);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return rc;
+
+rel_out:
+ ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ return rc;
+}
+
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values, and check for valid field
+ * values.
+ **/
+STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+ * PHY address. This register field was has only been used for X552.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_a &&
+ hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
+ hw->phy.addr = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1688,14 +2174,11 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode.
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
}
@@ -1722,11 +2205,6 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
case ixgbe_phy_x550em_ext_t:
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
-
/* If internal link mode is XFI, then setup iXFI internal link,
* else setup KR now.
*/
@@ -1742,6 +2220,15 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
+ case ixgbe_phy_sgmii:
+ phy->ops.setup_link = NULL;
+ break;
+ case ixgbe_phy_m88:
+ phy->ops.setup_link = ixgbe_setup_m88;
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
+ phy->ops.reset = ixgbe_reset_phy_m88;
+ break;
default:
break;
}
@@ -1752,12 +2239,14 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
* ixgbe_set_mdio_speed - Set MDIO clock speed
* @hw: pointer to hardware structure
*/
-static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
{
u32 hlreg0;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
@@ -1933,10 +2422,13 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
* ixgbe_setup_kr_x550em - Configure the KR PHY.
* @hw: pointer to hardware structure
*
- * Configures the integrated KR PHY.
+ * Configures the integrated KR PHY for X550EM_x.
**/
s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
+ if (hw->mac.type != ixgbe_mac_X550EM_x)
+ return IXGBE_SUCCESS;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -2019,6 +2511,99 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for SFP support.
+ **/
+s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ s32 ret_val;
+ u16 reg_phy_ext;
+ bool setup_linear = false;
+ u32 reg_slice, reg_phy_int, slice_offset;
+
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* Check if SFP module is supported and linear */
+ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
+ return IXGBE_SUCCESS;
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
+ /* Configure internal PHY for native SFI */
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ if (setup_linear) {
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
+ } else {
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
+ }
+
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* Setup XFI/SFI internal link. */
+ ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
+ } else {
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) {
+ /* Find Address */
+ DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n");
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ }
+
+ /* Get external PHY device id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* When configuring quad port CS4223, the MAC instance is part
+ * of the slice offset.
+ */
+ if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ slice_offset = (hw->bus.lan_id +
+ (hw->bus.instance_id << 1)) << 12;
+ else
+ slice_offset = hw->bus.lan_id << 12;
+
+ /* Configure CS4227/CS4223 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+ if (setup_linear)
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
+ ret_val = hw->phy.ops.write_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+ }
+ return ret_val;
+}
+
+/**
* ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
* @hw: pointer to hardware structure
*
@@ -2261,57 +2846,57 @@ s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
u32 reg_val;
/* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Set near-end loopback clocks. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Set loopback enable. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Training bypass. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
}
@@ -3147,24 +3732,30 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
goto out;
}
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
- ret_val = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (ret_val != IXGBE_SUCCESS)
goto out;
reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
- IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
if (pause)
reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
if (asm_dir)
reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
- ret_val = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* This device does not fully support AN. */
hw->fc.disable_fc_autoneg = true;
+ break;
+ default:
+ break;
}
out:
@@ -3172,6 +3763,183 @@ out:
}
/**
+ * ixgbe_fc_autoneg_x550a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw)
+{
+ u32 link_s1, lp_an_page_low, an_cntl_1;
+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ /* Check at auto-negotiation has completed */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_S1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
+
+ if (status != IXGBE_SUCCESS ||
+ (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ /* Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ goto out;
+ }
+
+ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
+ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
+ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_setup_fc_x550em - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_SUCCESS;
+ u32 an_cntl, link_ctrl = 0;
+
+ DEBUGFUNC("ixgbe_setup_fc_x550em");
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Set up the 1G and 10G flow control advertisement registers so the
+ * HW will be able to do FC autoneg once the cable is plugged in. If
+ * we link at 10G, the 1G advertisement is harmless and vice versa.
+ */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ return status;
+ }
+
+ /* The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ break;
+ default:
+ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+ "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
+
+ /* Restart auto-negotiation. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
+
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ return status;
+ }
+
+ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
+
+ return status;
+}
+
+/**
* ixgbe_set_mux - Set mux for port 1 access with CS4227
* @hw: pointer to hardware structure
* @state: set mux if 1, clear if 0
@@ -3238,7 +4006,7 @@ void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
*
* Acquires the SWFW semaphore and get the shared phy token as needed
*/
-static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
+STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
int retries = FW_PHY_TOKEN_RETRIES;
@@ -3247,17 +4015,22 @@ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
while (--retries) {
+ status = IXGBE_SUCCESS;
if (hmask)
status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
if (status)
- break;
+ return status;
if (!(mask & IXGBE_GSSR_TOKEN_SM))
- break;
+ return IXGBE_SUCCESS;
+
status = ixgbe_get_phy_token(hw);
- if (status != IXGBE_ERR_TOKEN_RETRY)
- break;
+ if (status == IXGBE_SUCCESS)
+ return IXGBE_SUCCESS;
+
if (hmask)
ixgbe_release_swfw_sync_X540(hw, hmask);
+ if (status != IXGBE_ERR_TOKEN_RETRY)
+ return status;
msec_delay(FW_PHY_TOKEN_DELAY);
}
@@ -3271,7 +4044,7 @@ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
*
* Releases the SWFW semaphore and puts the shared phy token as needed
*/
-static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
+STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
@@ -3285,6 +4058,63 @@ static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
}
/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ **/
+s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ s32 status;
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_read_phy_reg_x550a");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ **/
+s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ s32 status;
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+
+ DEBUGFUNC("ixgbe_write_phy_reg_x550a");
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ return status;
+}
+
+/**
* ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
* @hw: pointer to hardware structure
*
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h
index a8c0a678..27d5d02f 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.h
+++ b/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -36,6 +36,49 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_type.h"
+/* More phy definitions */
+#define IXGBE_M88E1500_COPPER_CTRL 0x0/* Page 0 reg */
+#define IXGBE_M88E1500_COPPER_CTRL_RESET 0x8000
+#define IXGBE_M88E1500_COPPER_CTRL_AN_EN 0x1000
+#define IXGBE_M88E1500_COPPER_CTRL_RESTART_AN 0x0200
+#define IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX 0x0100
+#define IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB 0x0040
+#define IXGBE_M88E1500_1000T_CTRL 0x09 /* 1000Base-T Ctrl Reg */
+/* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define IXGBE_M88E1500_1000T_CTRL_MS_VALUE 0x0800
+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define IXGBE_M88E1500_1000T_CTRL_MS_ENABLE 0x1000
+#define IXGBE_M88E1500_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define IXGBE_M88E1500_AUTO_COPPER_SGMII 0x2
+#define IXGBE_M88E1500_AUTO_COPPER_BASEX 0x3
+#define IXGBE_M88E1500_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define IXGBE_M88E1500_MAC_CTRL_1 0x10
+#define IXGBE_M88E1500_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define IXGBE_M88E1500_CFG_REG_1 0x0010
+#define IXGBE_M88E1500_CFG_REG_2 0x0011
+#define IXGBE_M88E1500_CFG_REG_3 0x0007
+#define IXGBE_M88E1500_MODE 0x0014
+#define IXGBE_M88E1500_PAGE_ADDR 0x16/* Page Offset reg */
+#define IXGBE_M88E1500_FIBER_CTRL 0x0/* Page 1 reg */
+#define IXGBE_M88E1500_FIBER_CTRL_RESET 0x8000
+#define IXGBE_M88E1500_FIBER_CTRL_SPEED_LSB 0x2000
+#define IXGBE_M88E1500_FIBER_CTRL_POWER_DOWN 0x0800
+#define IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL 0x0100
+#define IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB 0x0040
+#define IXGBE_M88E1500_EEE_CTRL_1 0x0/* Page 18 reg */
+#define IXGBE_M88E1500_EEE_CTRL_1_MS 0x0001/* EEE Master/Slave */
+#define IXGBE_M88E1500_GEN_CTRL 0x14/* Page 18 reg */
+#define IXGBE_M88E1500_GEN_CTRL_RESET 0x8000
+#define IXGBE_M88E1500_GEN_CTRL_SGMII_COPPER 0x0001/* Mode bits 0-2 */
+
+/* M88E1500 Specific Registers */
+#define IXGBE_M88E1500_PHY_SPEC_CTRL 0x10 /* PHY Specific Ctrl Reg */
+#define IXGBE_M88E1500_PHY_SPEC_STATUS 0x11 /* PHY Specific Stat Reg */
+
+#define IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define IXGBE_M88E1500_PSCR_DOWNSHIFT_MASK 0x7000
+#define IXGBE_M88E1500_PSCR_DOWNSHIFT_6X 0x5000
+
s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
@@ -100,6 +143,15 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw);
s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
diff --git a/drivers/net/ixgbe/ixgbe_82599_bypass.c b/drivers/net/ixgbe/ixgbe_82599_bypass.c
index 21c42eac..de9fa5a7 100644
--- a/drivers/net/ixgbe/ixgbe_82599_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_82599_bypass.c
@@ -297,8 +297,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
{
int rc;
- if ((rc = ixgbe_init_hw(hw)) == 0 &&
- hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ rc = ixgbe_init_hw(hw);
+ if (rc == 0 && hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
hw->mac.ops.setup_link =
&ixgbe_setup_mac_link_multispeed_fixed_fiber;
@@ -306,8 +306,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
hw->mac.ops.disable_tx_laser = NULL;
- hw->mac.ops.enable_tx_laser = NULL;
- hw->mac.ops.flap_tx_laser = NULL;
+ hw->mac.ops.enable_tx_laser = NULL;
+ hw->mac.ops.flap_tx_laser = NULL;
}
return rc;
diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c
index 73f608b9..70069284 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.c
+++ b/drivers/net/ixgbe/ixgbe_bypass.c
@@ -82,7 +82,7 @@ ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
BYPASS_CTL1_VALID_M |
BYPASS_CTL1_OFFTRST_M;
value = (sec & BYPASS_CTL1_TIME_M) |
- BYPASS_CTL1_VALID |
+ BYPASS_CTL1_VALID |
BYPASS_CTL1_OFFTRST;
FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
@@ -275,8 +275,8 @@ s32
ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
{
struct ixgbe_hw *hw;
- u32 status;
- u32 mask;
+ u32 status;
+ u32 mask;
s32 ret_val;
struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h
index fcd97743..5f5c63e3 100644
--- a/drivers/net/ixgbe/ixgbe_bypass.h
+++ b/drivers/net/ixgbe/ixgbe_bypass.h
@@ -37,10 +37,10 @@
#ifdef RTE_NIC_BYPASS
struct ixgbe_bypass_mac_ops {
- s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status);
- bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg);
- s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
- s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value);
+ s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status);
+ bool (*bypass_valid_rd)(u32 in_reg, u32 out_reg);
+ s32 (*bypass_set)(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
+ s32 (*bypass_rd_eep)(struct ixgbe_hw *hw, u32 addr, u8 *value);
};
struct ixgbe_bypass_info {
diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h
index 22570acf..cafcb278 100644
--- a/drivers/net/ixgbe/ixgbe_bypass_defines.h
+++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h
@@ -136,7 +136,7 @@ enum ixgbe_state_t {
#define BYPASS_LOG_EVENT_SHIFT 28
#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */
#define IXGBE_DEV_TO_ADPATER(dev) \
- ((struct ixgbe_adapter*)(dev->data->dev_private))
+ ((struct ixgbe_adapter *)(dev->data->dev_private))
/* extractions from ixgbe_phy.h */
#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 3f1ebc15..0629b426 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -150,6 +150,7 @@
#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_QDE_STRIP_TAG 0x00000004
+#define IXGBE_VTEICR_MASK 0x07
enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_NONE = 0,
@@ -157,6 +158,9 @@ enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_ALLMULTI,
};
+#define IXGBE_EXVET_VET_EXT_SHIFT 16
+#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
+
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
@@ -174,11 +178,15 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
@@ -232,7 +240,7 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
@@ -264,14 +272,14 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
- ether_addr* mac_addr,uint8_t on);
-static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+ ether_addr * mac_addr, uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask,uint8_t vlan_on);
+ uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
@@ -361,6 +369,8 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
struct timespec *timestamp);
static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
const struct timespec *timestamp);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
static int ixgbe_dev_l2_tunnel_eth_type_conf
(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
@@ -397,21 +407,21 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
last = latest; \
}
-#define IXGBE_SET_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] |= 1 << bit;\
} while (0)
-#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] &= ~(1 << bit);\
} while (0)
-#define IXGBE_GET_HWSTRIP(h, q, r) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
@@ -466,6 +476,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.xstats_get = ixgbe_dev_xstats_get,
.stats_reset = ixgbe_dev_stats_reset,
.xstats_reset = ixgbe_dev_xstats_reset,
+ .xstats_get_names = ixgbe_dev_xstats_get_names,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
.dev_infos_get = ixgbe_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
@@ -555,6 +566,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
.xstats_reset = ixgbevf_dev_stats_reset,
+ .xstats_get_names = ixgbevf_dev_xstats_get_names,
.dev_close = ixgbevf_dev_close,
.allmulticast_enable = ixgbevf_dev_allmulticast_enable,
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
@@ -685,6 +697,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
sizeof(rte_ixgbe_rxq_strings[0]))
+#define IXGBE_NB_RXQ_PRIO_VALUES 8
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
{"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
@@ -695,6 +708,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
sizeof(rte_ixgbe_txq_strings[0]))
+#define IXGBE_NB_TXQ_PRIO_VALUES 8
static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
{"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
@@ -901,8 +915,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
- }
- else {
+ } else {
PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
@@ -911,7 +924,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
}
static void
-ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
+ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
{
struct ixgbe_stat_mapping_registers *stat_mappings =
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
@@ -929,7 +942,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
}
static void
-ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
{
uint8_t i;
struct ixgbe_dcb_tc_config *tc;
@@ -952,7 +965,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
tc = &dcb_config->tc_config[0];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
- for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
}
@@ -1016,7 +1029,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
@@ -1039,17 +1052,18 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
* has already done this work. Only check we don't need a different
* RX and TX function.
*/
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
- * Tx queue may not initialized by primary process */
+ * Tx queue may not initialized by primary process
+ */
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
- "Using default TX function.");
+ "Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
@@ -1086,7 +1100,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
- ixgbe_dcb_init(hw,dcb_config);
+ ixgbe_dcb_init(hw, dcb_config);
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full;
@@ -1127,11 +1141,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
- "LOM. Please be aware there may be issues associated "
- "with your hardware.");
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
PMD_INIT_LOG(ERR, "If you are experiencing problems "
- "please contact your Intel or hardware representative "
- "who provided you with this hardware.");
+ "please contact your Intel or hardware representative "
+ "who provided you with this hardware.");
} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
if (diag) {
@@ -1150,12 +1164,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
@@ -1164,11 +1178,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing hash filter MAC addresses */
eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- IXGBE_VMDQ_NUM_UC_MAC, 0);
+ IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
@@ -1198,8 +1212,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
(int) hw->mac.type, (int) hw->phy.type);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
- eth_dev->data->port_id, pci_dev->id.vendor_id,
- pci_dev->id.device_id);
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
rte_intr_callback_register(&pci_dev->intr_handle,
ixgbe_dev_interrupt_handler,
@@ -1214,7 +1228,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
return 0;
}
@@ -1313,7 +1327,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
@@ -1327,8 +1341,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
- * RX function */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
@@ -1339,7 +1354,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE,
- "No TX queues configured yet. Using default TX function.");
+ "No TX queues configured yet. Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
@@ -1398,12 +1413,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
@@ -1433,14 +1448,20 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* reset the hardware with the new settings */
diag = hw->mac.ops.start_hw(hw);
switch (diag) {
- case 0:
- break;
+ case 0:
+ break;
- default:
- PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return -EIO;
+ default:
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return -EIO;
}
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ ixgbevf_dev_interrupt_handler,
+ (void *)eth_dev);
+ rte_intr_enable(&pci_dev->intr_handle);
+ ixgbevf_intr_enable(hw);
+
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "ixgbe_mac_82599_vf");
@@ -1454,6 +1475,7 @@ static int
eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct ixgbe_hw *hw;
+ struct rte_pci_device *pci_dev = eth_dev->pci_dev;
PMD_INIT_FUNC_TRACE();
@@ -1475,6 +1497,11 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ ixgbevf_dev_interrupt_handler,
+ (void *)eth_dev);
+
return 0;
}
@@ -1537,7 +1564,7 @@ ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vfta;
uint32_t vid_idx;
@@ -1575,15 +1602,47 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret = 0;
+ uint32_t reg;
+ uint32_t qinq;
+
+ qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ qinq &= IXGBE_DMATXCTL_GDV;
switch (vlan_type) {
case ETH_VLAN_TYPE_INNER:
- /* Only the high 16-bits is valid */
- IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+ if (qinq) {
+ reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+ | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+ } else {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Inner type is not supported"
+ " by single VLAN");
+ }
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ if (qinq) {
+ /* Only the high 16-bits is valid */
+ IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
+ IXGBE_EXVET_VET_EXT_SHIFT);
+ } else {
+ reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+ | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+ }
+
break;
default:
ret = -EINVAL;
- PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
break;
}
@@ -1611,7 +1670,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vlnctrl;
uint16_t i;
@@ -1635,6 +1694,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
{
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+ struct ixgbe_rx_queue *rxq;
if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
return;
@@ -1643,6 +1703,16 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
IXGBE_SET_HWSTRIP(hwstrip, queue);
else
IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+ if (queue >= dev->data->nb_rx_queues)
+ return;
+
+ rxq = dev->data->rx_queues[queue];
+
+ if (on)
+ rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ else
+ rxq->vlan_flags = PKT_RX_VLAN_PKT;
}
static void
@@ -1659,12 +1729,12 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
}
@@ -1683,12 +1753,12 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
@@ -1707,8 +1777,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl &= ~IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
@@ -1735,8 +1804,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
@@ -1836,6 +1904,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
@@ -2116,7 +2185,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
ixgbe_stop_adapter(hw);
/* reinitialize adapter
- * this calls reset and start */
+ * this calls reset and start
+ */
status = ixgbe_pf_reset_hw(hw);
if (status != 0)
return -1;
@@ -2251,7 +2321,7 @@ skip_link_setup:
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbe_vlan_offload_set(dev, mask);
@@ -2471,8 +2541,8 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++) {
- uint32_t mp;
- mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
/* global total per queue */
hw_stats->mpc[i] += mp;
/* Running comprehensive total for stats display */
@@ -2664,15 +2734,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Rx Errors */
stats->imissed = total_missed_rx;
stats->ierrors = hw_stats->crcerrs +
- hw_stats->mspdc +
- hw_stats->rlec +
- hw_stats->ruc +
- hw_stats->roc +
- hw_stats->illerrc +
- hw_stats->errbc +
- hw_stats->rfc +
- hw_stats->fccrc +
- hw_stats->fclast;
+ hw_stats->mspdc +
+ hw_stats->rlec +
+ hw_stats->ruc +
+ hw_stats->roc +
+ hw_stats->illerrc +
+ hw_stats->errbc +
+ hw_stats->rfc +
+ hw_stats->fccrc +
+ hw_stats->fclast;
/* Tx Errors */
stats->oerrors = 0;
@@ -2694,12 +2764,76 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
/* This function calculates the number of xstats based on the current config */
static unsigned
ixgbe_xstats_calc_num(void) {
- return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
- (IXGBE_NB_TXQ_PRIO_STATS * 8);
+ return IXGBE_NB_HW_STATS +
+ (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
+ (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
+}
+
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+{
+ const unsigned cnt_stats = ixgbe_xstats_calc_num();
+ unsigned stat, i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+
+ /* Extended stats from ixgbe_hw_stats */
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_stats_strings[i].name);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ count++;
+ }
+ }
+ }
+ return cnt_stats;
+}
+
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned limit)
+{
+ unsigned i;
+
+ if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
+ return -ENOMEM;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_ixgbevf_stats_strings[i].name);
+ return IXGBEVF_NB_XSTATS;
}
static int
-ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct ixgbe_hw *hw =
@@ -2731,8 +2865,6 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* Extended stats from ixgbe_hw_stats */
count = 0;
for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
- rte_ixgbe_stats_strings[i].name);
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_stats_strings[i].offset);
count++;
@@ -2740,10 +2872,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
- for (i = 0; i < 8; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_priority%u_%s", i,
- rte_ixgbe_rxq_strings[stat].name);
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_rxq_strings[stat].offset +
(sizeof(uint64_t) * i));
@@ -2753,17 +2882,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* TX Priority Stats */
for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
- for (i = 0; i < 8; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_priority%u_%s", i,
- rte_ixgbe_txq_strings[stat].name);
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_txq_strings[stat].offset +
(sizeof(uint64_t) * i));
count++;
}
}
-
return count;
}
@@ -2786,7 +2911,7 @@ static void
ixgbevf_update_stats(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Good Rx packet, include VF loopback */
@@ -2811,7 +2936,7 @@ ixgbevf_update_stats(struct rte_eth_dev *dev)
}
static int
-ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
@@ -2828,8 +2953,6 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
/* Extended stats */
for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
- snprintf(xstats[i].name, sizeof(xstats[i].name),
- "%s", rte_ixgbevf_stats_strings[i].name);
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbevf_stats_strings[i].offset);
}
@@ -2852,14 +2975,12 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->ibytes = hw_stats->vfgorc;
stats->opackets = hw_stats->vfgptc;
stats->obytes = hw_stats->vfgotc;
- stats->imcasts = hw_stats->vfmprc;
- /* stats->imcasts should be removed as imcasts is deprecated */
}
static void
ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Sync HW register to the last stats */
@@ -2870,8 +2991,6 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
hw_stats->vfgorc = 0;
hw_stats->vfgptc = 0;
hw_stats->vfgotc = 0;
- hw_stats->vfmprc = 0;
-
}
static void
@@ -3356,7 +3475,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
if (intr_enable_delay) {
if (rte_eal_alarm_set(timeout * 1000,
- ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
} else {
PMD_DRV_LOG(DEBUG, "enable intr immediately");
@@ -3575,7 +3694,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* Enable flow control according to the current settings.
*/
static int
-ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
{
int ret_val = 0;
uint32_t mflcn_reg, fccfg_reg;
@@ -3622,13 +3741,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_rx_pause:
/*
@@ -3645,20 +3764,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_tx_pause:
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled by software override.
*/
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
@@ -3669,7 +3788,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
ret_val = IXGBE_ERR_CONFIG;
goto out;
- break;
}
/* Set 802.3x based flow control settings. */
@@ -3708,13 +3826,13 @@ out:
}
static int
-ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
if (hw->mac.type != ixgbe_mac_82598EB) {
- ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+ ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
}
return ret_val;
}
@@ -3728,9 +3846,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
uint8_t tc_num;
uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_dcb_config *dcb_config =
- IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
ixgbe_fc_none,
@@ -3763,7 +3881,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
- err = ixgbe_dcb_pfc_enable(dev,tc_num);
+ err = ixgbe_dcb_pfc_enable(dev, tc_num);
/* Not negotiated is not an error case */
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
@@ -3911,7 +4029,8 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
+ * feature has not been enabled before.
+ */
if (!dev->data->scattered_rx &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
@@ -3971,7 +4090,7 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw)
static int
ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
- struct rte_eth_conf* conf = &dev->data->dev_conf;
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
@@ -4033,10 +4152,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
}
/* Set vfta */
- ixgbevf_set_vfta_all(dev,1);
+ ixgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbevf_vlan_offload_set(dev, mask);
@@ -4077,6 +4196,8 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ ixgbevf_intr_disable(hw);
+
hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
@@ -4084,7 +4205,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
* Clear what we set, but we still keep shadow_vfta to
* restore after device starts
*/
- ixgbevf_set_vfta_all(dev,0);
+ ixgbevf_set_vfta_all(dev, 0);
/* Clear stored conf */
dev->data->scattered_rx = 0;
@@ -4123,18 +4244,19 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
int i = 0, j = 0, vfta = 0, mask = 1;
- for (i = 0; i < IXGBE_VFTA_SIZE; i++){
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
vfta = shadow_vfta->vfta[i];
if (vfta) {
mask = 1;
- for (j = 0; j < 32; j++){
+ for (j = 0; j < 32; j++) {
if (vfta & mask)
- ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
- mask<<=1;
+ ixgbe_set_vfta(hw, (i<<5)+j, 0,
+ on, false);
+ mask <<= 1;
}
}
}
@@ -4146,7 +4268,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vid_idx = 0;
uint32_t vid_bit = 0;
@@ -4155,7 +4277,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
PMD_INIT_FUNC_TRACE();
/* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
- ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
+ ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to set VF vlan");
return ret;
@@ -4191,7 +4313,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
ctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
}
static void
@@ -4207,7 +4329,7 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
for (i = 0; i < hw->mac.max_rx_queues; i++)
- ixgbevf_vlan_strip_queue_set(dev,i,on);
+ ixgbevf_vlan_strip_queue_set(dev, i, on);
}
}
@@ -4227,9 +4349,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
}
static uint32_t
-ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
{
uint32_t vector = 0;
+
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((uc_addr->addr_bytes[4] >> 4) |
@@ -4257,8 +4380,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
}
static int
-ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
- uint8_t on)
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint8_t on)
{
uint32_t vector;
uint32_t uta_idx;
@@ -4279,7 +4402,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
if (hw->mac.type < ixgbe_mac_82599EB)
return -ENOTSUP;
- vector = ixgbe_uta_vector(hw,mac_addr);
+ vector = ixgbe_uta_vector(hw, mac_addr);
uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
uta_shift = vector & ixgbe_uta_bit_mask;
@@ -4304,7 +4427,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
else
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
return 0;
}
@@ -4389,7 +4512,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
static int
ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
@@ -4399,16 +4522,26 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
if (ixgbe_vmdq_mode_check(hw) < 0)
return -ENOTSUP;
- addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+ if (pool >= ETH_64_POOLS)
+ return -EINVAL;
+
+ /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (pool >= 32) {
+ addr = IXGBE_VFRE(1);
+ val = bit1 << (pool - 32);
+ } else {
+ addr = IXGBE_VFRE(0);
+ val = bit1 << pool;
+ }
+
reg = IXGBE_READ_REG(hw, addr);
- val = bit1 << pool;
if (on)
reg |= val;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
@@ -4416,7 +4549,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
static int
ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
@@ -4426,16 +4559,26 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
if (ixgbe_vmdq_mode_check(hw) < 0)
return -ENOTSUP;
- addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+ if (pool >= ETH_64_POOLS)
+ return -EINVAL;
+
+ /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (pool >= 32) {
+ addr = IXGBE_VFTE(1);
+ val = bit1 << (pool - 32);
+ } else {
+ addr = IXGBE_VFTE(0);
+ val = bit1 << pool;
+ }
+
reg = IXGBE_READ_REG(hw, addr);
- val = bit1 << pool;
if (on)
reg |= val;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
@@ -4453,7 +4596,8 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
return -ENOTSUP;
for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+ ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
+ vlan_on, false);
if (ret < 0)
return ret;
}
@@ -4475,7 +4619,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
- uint32_t mr_ctl,vlvf;
+ uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
uint32_t mv_msb = 0;
uint32_t mv_lsb = 0;
@@ -4488,7 +4632,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
const uint8_t vlan_mask_offset = 32;
const uint8_t dst_pool_offset = 8;
const uint8_t rule_mr_offset = 4;
- const uint8_t mirror_rule_mask= 0x0F;
+ const uint8_t mirror_rule_mask = 0x0F;
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
@@ -4511,11 +4655,12 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
- for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+ for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
/* search vlan id related pool vlan filter index */
reg_index = ixgbe_find_vlvf_slot(hw,
- mirror_conf->vlan.vlan_id[i]);
+ mirror_conf->vlan.vlan_id[i],
+ false);
if (reg_index < 0)
return -EINVAL;
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
@@ -4800,6 +4945,9 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
uint32_t q_idx;
uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+ /* Configure VF other cause ivar */
+ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
/* won't configure msix register if no mapping is done
* between intr vector and event fd.
*/
@@ -4814,9 +4962,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
intr_handle->intr_vec[q_idx] = vector_idx;
}
-
- /* Configure VF other cause ivar */
- ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
}
/**
@@ -5306,7 +5451,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
+ * feature has not been enabled before.
+ */
if (!dev->data->scattered_rx &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
@@ -7135,6 +7281,67 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
}
+static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 in_msg = 0;
+
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
+
+ /* PF reset VF event */
+ if (in_msg == IXGBE_PF_CONTROL_MSG)
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+}
+
+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ ixgbevf_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ intr->flags = 0;
+
+ /* only one misc vector supported - mailbox */
+ eicr &= IXGBE_VTEICR_MASK;
+ if (eicr == IXGBE_MISC_VEC_ID)
+ intr->flags |= IXGBE_FLAG_MAILBOX;
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (intr->flags & IXGBE_FLAG_MAILBOX) {
+ ixgbevf_mbx_process(dev);
+ intr->flags &= ~IXGBE_FLAG_MAILBOX;
+ }
+
+ ixgbevf_intr_enable(hw);
+
+ return 0;
+}
+
+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+ ixgbevf_dev_interrupt_get_status(dev);
+ ixgbevf_dev_interrupt_action(dev);
+}
+
static struct rte_driver rte_ixgbe_driver = {
.type = PMD_PDEV,
.init = rte_ixgbe_pmd_init,
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 2e4c353a..861c7cbe 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -189,14 +189,13 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
- PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
- "during enabling!");
+ PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
return -ETIMEDOUT;
}
return 0;
@@ -282,6 +281,7 @@ static inline uint32_t
reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
{
uint32_t mask = hi_dword << 16;
+
mask |= lo_dword;
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
@@ -810,8 +810,10 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
/* Process bits 0 and 16 */
- if (key & 0x0001) hash_result ^= lo_hash_dword;
- if (key & 0x00010000) hash_result ^= hi_hash_dword;
+ if (key & 0x0001)
+ hash_result ^= lo_hash_dword;
+ if (key & 0x00010000)
+ hash_result ^= hi_hash_dword;
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
@@ -822,9 +824,11 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
/* process the remaining 30 bits in the key 2 bits at a time */
- for (i = 15; i; i-- ) {
- if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
- if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ for (i = 15; i; i--) {
+ if (key & (0x0001 << i))
+ hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i))
+ hash_result ^= hi_hash_dword >> i;
}
return hash_result;
@@ -1016,7 +1020,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
@@ -1080,9 +1084,9 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
*/
static int
ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter,
- bool del,
- bool update)
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
@@ -1092,7 +1096,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
bool is_perfect = FALSE;
int err;
struct ixgbe_hw_fdir_info *info =
- IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
if (fdir_mode == RTE_FDIR_MODE_NONE)
@@ -1109,12 +1113,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
(fdir_filter->input.flow_type ==
- RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+ RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
- " IPv4-other is not supported without"
- " L4 protocol and ports masked!");
+ " IPv4-other is not supported without"
+ " L4 protocol and ports masked!");
return -ENOTSUP;
}
@@ -1132,16 +1136,16 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
if (is_perfect) {
if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
- " perfect mode!");
+ " perfect mode!");
return -ENOTSUP;
}
fdirhash = atr_compute_perfect_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
+ dev->data->dev_conf.fdir_conf.pballoc);
fdirhash |= fdir_filter->soft_id <<
- IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
fdirhash = atr_compute_sig_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
+ dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
err = fdir_erase_filter_82599(hw, fdirhash);
@@ -1159,22 +1163,22 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
} else {
PMD_DRV_LOG(ERR, "Drop option is not supported in"
- " signature mode.");
+ " signature mode.");
return -EINVAL;
}
} else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
- fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
+ fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
queue = (uint8_t)fdir_filter->action.rx_queue;
else
return -EINVAL;
if (is_perfect) {
err = fdir_write_perfect_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash,
- fdir_mode);
+ fdircmd_flags, fdirhash,
+ fdir_mode);
} else {
err = fdir_add_signature_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash);
+ fdircmd_flags, fdirhash);
}
if (err < 0)
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
@@ -1269,22 +1273,22 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
- IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
uint32_t reg, max_num;
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
/* Get the information from registers */
reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
- IXGBE_FDIRFREE_COLL_SHIFT);
+ IXGBE_FDIRFREE_COLL_SHIFT);
info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
- IXGBE_FDIRFREE_FREE_SHIFT);
+ IXGBE_FDIRFREE_FREE_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
- IXGBE_FDIRLEN_MAXHASH_SHIFT);
+ IXGBE_FDIRLEN_MAXHASH_SHIFT);
info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
- IXGBE_FDIRLEN_MAXLEN_SHIFT);
+ IXGBE_FDIRLEN_MAXLEN_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
@@ -1310,10 +1314,10 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
- (reg & FDIRCTRL_PBALLOC_MASK)));
+ (reg & FDIRCTRL_PBALLOC_MASK)));
if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
- fdir_stats->guarant_cnt = max_num - fdir_stats->free;
+ fdir_stats->guarant_cnt = max_num - fdir_stats->free;
else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index a2787d90..56393ff2 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -97,9 +97,9 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
struct ixgbe_mirror_info *mirror_info =
- IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
struct ixgbe_uta_info *uta_info =
- IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
uint16_t vf_num;
@@ -108,15 +108,16 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
return;
*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
- memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
- memset(uta_info,0,sizeof(struct ixgbe_uta_info));
+ memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
+ memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
if (vf_num >= ETH_32_POOLS) {
@@ -141,8 +142,6 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
/* set mb interrupt mask */
ixgbe_mb_intr_setup(eth_dev);
-
- return;
}
void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
@@ -220,7 +219,8 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
uint32_t vlanctrl;
int i;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
return -1;
/* enable VMDq and set the default pool for PF */
@@ -280,19 +280,18 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
}
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- /*
+ /*
* enable vlan filtering and allow all vlan tags through
*/
- vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
- /* VFTA - enable all vlan filters */
- for (i = 0; i < IXGBE_MAX_VFTA; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
- }
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IXGBE_MAX_VFTA; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
@@ -481,7 +480,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
- if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
}
@@ -545,7 +544,7 @@ ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
vfinfo[vf].vlan_count++;
else if (vfinfo[vf].vlan_count)
vfinfo[vf].vlan_count--;
- return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
+ return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false);
}
static int
@@ -678,6 +677,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
int ret = ixgbe_vf_reset(dev, vf, msgbuf);
+
vfinfo[vf].clear_to_send = true;
return ret;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9fb38a6c..8a306b06 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -88,17 +88,6 @@
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
-
#if 1
#define RTE_PMD_USE_PREFETCH
#endif
@@ -352,6 +341,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_tx = 0;
while (nb_pkts) {
uint16_t ret, n;
+
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
nb_tx = (uint16_t)(nb_tx + ret);
@@ -478,30 +468,28 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
*/
static inline uint32_t
what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
- union ixgbe_tx_offload tx_offload)
+ union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
- & tx_offload.data[0])) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
- & tx_offload.data[1])))) {
- return txq->ctx_curr;
- }
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
- & tx_offload.data[0])) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
- & tx_offload.data[1])))) {
- return txq->ctx_curr;
- }
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
/* Mismatch, use the previous context */
return IXGBE_CTX_NUM;
@@ -511,6 +499,7 @@ static inline uint32_t
tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
{
uint32_t tmp = 0;
+
if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
tmp |= IXGBE_ADVTXD_POPTS_TXSM;
if (ol_flags & PKT_TX_IP_CKSUM)
@@ -524,6 +513,7 @@ static inline uint32_t
tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
uint32_t cmdtype = 0;
+
if (ol_flags & PKT_TX_VLAN_PKT)
cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
@@ -561,8 +551,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
status = txr[desc_to_clean_to].wb.status;
- if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
- {
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
"(port=%d queue=%d)",
@@ -920,24 +909,40 @@ end_of_tx:
* RX functions
*
**********************************************************************/
-#define IXGBE_PACKET_TYPE_IPV4 0X01
-#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
-#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
-#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
-#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
-#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
-#define IXGBE_PACKET_TYPE_IPV6 0X04
-#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
-#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
-#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
-#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
-#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
-#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
-#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+
+#define IXGBE_PACKET_TYPE_ETHER 0X00
+#define IXGBE_PACKET_TYPE_IPV4 0X01
+#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
+#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
+#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
+#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23
+#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
+#define IXGBE_PACKET_TYPE_IPV6 0X04
+#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
+#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44
+#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C
+#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F
+#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F
#define IXGBE_PACKET_TYPE_NVGRE 0X00
#define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01
@@ -945,13 +950,17 @@ end_of_tx:
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13
+#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43
#define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C
#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15
#define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25
@@ -965,13 +974,17 @@ end_of_tx:
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93
+#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3
#define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C
#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC
+#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95
#define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5
@@ -993,48 +1006,88 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
*/
static const uint32_t
ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER,
[IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4,
+ [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
[IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
[IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6,
- [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP,
[IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT,
- [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_INNER_L3_IPV6,
[IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
- [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
[IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
[IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
- [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
- [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
- RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] =
+ RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
};
static const uint32_t
@@ -1087,6 +1140,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 |
+ RTE_PTYPE_INNER_L4_SCTP,
[IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4,
@@ -1094,6 +1151,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT |
+ RTE_PTYPE_INNER_L4_SCTP,
[IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] =
RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER |
@@ -1106,6 +1167,14 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE |
+ RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
[IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
@@ -1162,6 +1231,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP,
[IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
@@ -1170,6 +1243,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP,
[IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] =
RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
@@ -1182,6 +1259,14 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask)
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
};
if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
@@ -1232,7 +1317,7 @@ ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
}
static inline uint64_t
-rx_desc_status_to_pkt_flags(uint32_t rx_status)
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
{
uint64_t pkt_flags;
@@ -1241,7 +1326,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
* Do not check whether L3/L4 rx checksum done by NIC or not,
* That can be found from rte_eth_rxmode.hw_ip_checksum flag
*/
- pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+ pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
#ifdef RTE_LIBRTE_IEEE1588
if (rx_status & IXGBE_RXD_STAT_TMST)
@@ -1298,6 +1383,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
uint32_t pkt_info[LOOK_AHEAD];
int i, j, nb_rx = 0;
uint32_t status;
+ uint64_t vlan_flags = rxq->vlan_flags;
/* get references to current descriptor and S/W ring entry */
rxdp = &rxq->rx_ring[rxq->rx_tail];
@@ -1313,8 +1399,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
* reference packets that are ready to be received.
*/
for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
- i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
- {
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
@@ -1340,7 +1425,8 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
/* convert descriptor fields to rte mbuf flags */
- pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+ vlan_flags);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags
((uint16_t)pkt_info[j]);
@@ -1472,6 +1558,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
int i, j;
+
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
@@ -1523,6 +1610,7 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_rx = 0;
while (nb_pkts) {
uint16_t ret, n;
+
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
nb_rx = (uint16_t)(nb_rx + ret);
@@ -1554,6 +1642,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_rx;
uint16_t nb_hold;
uint64_t pkt_flags;
+ uint64_t vlan_flags;
nb_rx = 0;
nb_hold = 0;
@@ -1561,6 +1650,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
+ vlan_flags = rxq->vlan_flags;
while (nb_rx < nb_pkts) {
/*
* The order of operations here is important as the DD status
@@ -1608,7 +1698,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@@ -1670,7 +1760,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- pkt_flags = rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
pkt_flags = pkt_flags |
ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
@@ -1763,7 +1853,7 @@ ixgbe_fill_cluster_head_buf(
*/
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
- pkt_flags = rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info);
head->ol_flags = pkt_flags;
@@ -1879,7 +1969,7 @@ next_desc:
rte_le_to_cpu_16(rxd.wb.upper.length));
if (!bulk_alloc) {
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
"port_id=%u queue_id=%u",
@@ -1889,8 +1979,7 @@ next_desc:
rx_mbuf_alloc_failed++;
break;
}
- }
- else if (nb_hold > rxq->rx_free_thresh) {
+ } else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
@@ -2151,6 +2240,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
@@ -2170,7 +2260,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
txq->ctx_curr = 0;
- memset((void*)&txq->ctx_cache, 0,
+ memset((void *)&txq->ctx_cache, 0,
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
@@ -2443,6 +2533,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
+
mb = rxq->rx_stage[rxq->rx_next_avail + i];
rte_pktmbuf_free_seg(mb);
}
@@ -2665,7 +2756,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Zero init all the descriptors in the ring.
*/
- memset (rz->addr, 0, RX_RING_SZ);
+ memset(rz->addr, 0, RX_RING_SZ);
/*
* Modified to setup VFRDT for Virtual Function
@@ -2679,8 +2770,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
rxq->rdh_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
- }
- else {
+ } else {
rxq->rdt_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
rxq->rdh_reg_addr =
@@ -2816,6 +2906,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
@@ -2824,6 +2915,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(adapter, rxq);
@@ -3139,6 +3231,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
}
for (i = 0; i < nb_tcs; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
@@ -3147,14 +3240,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
/* zero alloc all unused TCs */
for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
+
+ rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
/* MRQC: enable vmdq and dcb */
- mrqc = ((num_pools == ETH_16_POOLS) ? \
- IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
+ mrqc = (num_pools == ETH_16_POOLS) ?
+ IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
/* PFVTCTL: turn on virtualisation and set the default pool */
@@ -3192,7 +3286,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
}
/* VFRE: pool enabling for receive - 16 or 32 */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*
@@ -3205,7 +3299,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
(cfg->pool_map[i].vlan_id & 0xFFF)));
/*
* Put the allowed pools in VFB reg. As we only have 16 or 32
@@ -3223,7 +3317,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
*/
static void
ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t q;
@@ -3238,18 +3332,17 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
/* Enable DCB for Tx with 8 TCs */
if (dcb_config->num_tcs.pg_tcs == 8) {
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- }
- else {
+ } else {
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
}
if (dcb_config->vt_mode)
- reg |= IXGBE_MTQC_VT_ENA;
+ reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
/* Disable drop for all queues */
for (q = 0; q < 128; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
@@ -3261,7 +3354,6 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
reg |= IXGBE_SECTX_DCB;
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
}
- return;
}
/**
@@ -3285,25 +3377,23 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*Configure general DCB TX parameters*/
- ixgbe_dcb_tx_hw_config(hw,dcb_config);
- return;
+ ixgbe_dcb_tx_hw_config(hw, dcb_config);
}
static void
ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
- if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
- }
- else {
+ } else {
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
@@ -3318,19 +3408,18 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
static void
ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
- if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
- }
- else {
+ } else {
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
@@ -3342,7 +3431,6 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
}
- return;
}
static void
@@ -3352,7 +3440,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
@@ -3373,7 +3461,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
struct rte_eth_dcb_tx_conf *tx_conf =
&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
@@ -3394,7 +3482,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
*/
static void
ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
@@ -3454,13 +3542,11 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
- return;
}
static void
ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
- uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -3485,16 +3571,16 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
- ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
- ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
break;
default:
break;
@@ -3515,7 +3601,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
int ret = 0;
- uint8_t i,pfc_en,nb_tcs;
+ uint8_t i, pfc_en, nb_tcs;
uint16_t pbsize, rx_buffer_size;
uint8_t config_dcb_rx = 0;
uint8_t config_dcb_tx = 0;
@@ -3529,7 +3615,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- switch(dev->data->dev_conf.rxmode.mq_mode){
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
dcb_config->vt_mode = true;
if (hw->mac.type != ixgbe_mac_82598EB) {
@@ -3560,10 +3646,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
case ETH_MQ_TX_VMDQ_DCB:
dcb_config->vt_mode = true;
config_dcb_tx = DCB_TX_CONFIG;
- /* get DCB and VT TX configuration parameters from rte_eth_conf */
- ixgbe_dcb_vt_tx_config(dev,dcb_config);
+ /* get DCB and VT TX configuration parameters
+ * from rte_eth_conf
+ */
+ ixgbe_dcb_vt_tx_config(dev, dcb_config);
/*Configure general VMDQ and DCB TX parameters*/
- ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
+ ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
break;
case ETH_MQ_TX_DCB:
@@ -3586,8 +3674,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Avoid un-configured priority mapping to TC0 */
uint8_t j = 4;
uint8_t mask = 0xFF;
+
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
- mask = (uint8_t)(mask & (~ (1 << map[i])));
+ mask = (uint8_t)(mask & (~(1 << map[i])));
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
map[j++] = i;
@@ -3623,6 +3712,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Set RX buffer size */
pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
+
for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
@@ -3632,9 +3722,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
}
}
if (config_dcb_tx) {
- /* Only support an equally distributed Tx packet buffer strategy. */
+ /* Only support an equally distributed
+ * Tx packet buffer strategy.
+ */
uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
+
for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
@@ -3647,9 +3740,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
}
/*Calculates traffic class credits*/
- ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
IXGBE_DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
IXGBE_DCB_RX_CONFIG);
if (config_dcb_rx) {
@@ -3659,7 +3752,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
/* Configure PG(ETS) RX */
- ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
+ ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
}
if (config_dcb_tx) {
@@ -3669,7 +3762,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
/* Configure PG(ETS) TX */
- ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
+ ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
}
/*Configure queue statistics registers*/
@@ -3683,7 +3776,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
* If the TC count is 8,and the default high_water is 48,
* the low_water is 16 as default.
*/
- hw->fc.high_water[i] = (pbsize * 3 ) / 4;
+ hw->fc.high_water[i] = (pbsize * 3) / 4;
hw->fc.low_water[i] = pbsize / 4;
/* Enable pfc for this TC */
tc = &dcb_config->tc_config[i];
@@ -3721,8 +3814,6 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
/** Configure DCB hardware **/
ixgbe_dcb_hw_configure(dev, dcb_cfg);
-
- return;
}
/*
@@ -3787,7 +3878,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
(cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
/*
* Put the allowed pools in VFB reg. As we only have 16 or 64
@@ -3795,12 +3886,11 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
* i.e. bits 0-31
*/
if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
(cfg->pool_map[i].pools & UINT32_MAX));
else
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
- ((cfg->pool_map[i].pools >> 32) \
- & UINT32_MAX));
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
+ ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
}
@@ -3840,7 +3930,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
/* Disable drop for all queues */
for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
@@ -3848,8 +3938,6 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
IXGBE_WRITE_FLUSH(hw);
-
- return;
}
static int __attribute__((cold))
@@ -3857,12 +3945,13 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
- unsigned i;
+ unsigned int i;
/* Initialize software ring entries */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union ixgbe_adv_rx_desc *rxd;
- struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned) rxq->queue_id);
@@ -4253,6 +4342,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
rxq->rx_using_sse = rx_using_sse;
}
}
@@ -4305,6 +4395,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RFCTL configuration */
if (rsc_capable) {
uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+
if (rx_conf->enable_lro)
/*
* Since NFS packets coalescing is not supported - clear
@@ -4498,6 +4589,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
uint32_t psrtype;
+
psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
@@ -4597,7 +4689,8 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Enable TX CRC (checksum offload requirement) and hw padding
- * (TSO requirement) */
+ * (TSO requirement)
+ */
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4622,26 +4715,26 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
* bookkeeping if things aren't delivered in order.
*/
switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- txctrl = IXGBE_READ_REG(hw,
- IXGBE_DCA_TXCTRL(txq->reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
- txctrl);
- break;
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_DCA_TXCTRL(txq->reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
+ txctrl);
+ break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_X550EM_a:
- default:
- txctrl = IXGBE_READ_REG(hw,
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ default:
+ txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
- txctrl);
- break;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
+ txctrl);
+ break;
}
}
@@ -4813,12 +4906,12 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
- /* Wait until RX Enable ready */
+ /* Wait until RX Enable bit clear */
poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
do {
rte_delay_ms(1);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
+ } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
rx_queue_id);
@@ -4892,48 +4985,48 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -1;
- /* Wait until TX queue is empty */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_us(RTE_IXGBE_WAIT_100_US);
- txtdh = IXGBE_READ_REG(hw,
- IXGBE_TDH(txq->reg_idx));
- txtdt = IXGBE_READ_REG(hw,
- IXGBE_TDT(txq->reg_idx));
- } while (--poll_ms && (txtdh != txtdt));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.", tx_queue_id);
- }
+ txq = dev->data->tx_queues[tx_queue_id];
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
- txdctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ /* Wait until TX queue is empty */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ txtdh = IXGBE_READ_REG(hw,
+ IXGBE_TDH(txq->reg_idx));
+ txtdt = IXGBE_READ_REG(hw,
+ IXGBE_TDT(txq->reg_idx));
+ } while (--poll_ms && (txtdh != txtdt));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
+ "when stopping.", tx_queue_id);
+ }
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw,
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable bit clear */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
IXGBE_TXDCTL(txq->reg_idx));
- } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d", tx_queue_id);
- }
+ } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable "
+ "Tx Queue %d", tx_queue_id);
+ }
- if (txq->ops != NULL) {
- txq->ops->release_mbufs(txq);
- txq->ops->reset(txq);
- }
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- } else
- return -1;
+ if (txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 3691a19d..2608b364 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -146,6 +146,8 @@ struct ixgbe_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint8_t rx_deferred_start; /**< not in global dev start. */
+ /** flags to set in mbuf when a vlan is detected. */
+ uint64_t vlan_flags;
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
new file mode 100644
index 00000000..62b82013
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -0,0 +1,326 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_RXTX_VEC_COMMON_H_
+#define _IXGBE_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned int pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len)
+ end->data_len -= rxq->crc_len;
+ else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static inline int __attribute__((always_inline))
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+{
+ struct ixgbe_tx_entry_v *txep;
+ uint32_t status;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (!(status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+{
+ unsigned int i;
+ struct ixgbe_tx_entry_v *txe;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+
+ if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
+ return;
+
+ /* release the used mbufs in sw_ring */
+ for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+ i != txq->tx_tail;
+ i = (i + 1) & max_desc) {
+ txe = &txq->sw_ring_v[i];
+ rte_pktmbuf_free_seg(txe->mbuf);
+ }
+ txq->nb_tx_free = max_desc;
+
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txe = &txq->sw_ring_v[i];
+ txe->mbuf = NULL;
+ }
+}
+
+static inline void
+_ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline void
+_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
+{
+ if (txq == NULL)
+ return;
+
+ if (txq->sw_ring != NULL) {
+ rte_free(txq->sw_ring_v - 1);
+ txq->sw_ring_v = NULL;
+ }
+}
+
+static inline void
+_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
+{
+ static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
+ struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++)
+ txq->tx_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->wb.status = IXGBE_TXD_STAT_DD;
+ txe[i].mbuf = NULL;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static inline int
+ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline int
+ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq,
+ const struct ixgbe_txq_ops *txq_ops)
+{
+ if (txq->sw_ring_v == NULL)
+ return -1;
+
+ /* leave the first one for overflow */
+ txq->sw_ring_v = txq->sw_ring_v + 1;
+ txq->ops = txq_ops;
+
+ return 0;
+}
+
+static inline int
+ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /*
+ * - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+#endif
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
new file mode 100644
index 00000000..64a329ea
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -0,0 +1,560 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "ixgbe_rxtx_vec_common.h"
+
+#include <arm_neon.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ uint64x2_t dma_addr0, dma_addr1;
+ uint64x2_t zero = vdupq_n_u64(0);
+ uint64_t paddr;
+ uint8x8_t p;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (unlikely(rte_mempool_get_bulk(rxq->mb_pool,
+ (void *)rxep,
+ RTE_IXGBE_RXQ_REARM_THRESH) < 0)) {
+ if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vst1q_u64((uint64_t *)&rxdp[i].read,
+ zero);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_IXGBE_RXQ_REARM_THRESH;
+ return;
+ }
+
+ p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /*
+ * Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ vst1_u8((uint8_t *)&mb0->rearm_data, p);
+ paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr0 = vsetq_lane_u64(paddr, zero, 0);
+ /* flush desc with pa dma_addr */
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
+
+ vst1_u8((uint8_t *)&mb1->rearm_data, p);
+ paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr1 = vsetq_lane_u64(paddr, zero, 0);
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
+
+#define VTAG_SHIFT (3)
+
+static inline void
+desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
+ uint8x16_t staterr, struct rte_mbuf **rx_pkts)
+{
+ uint8x16_t ptype;
+ uint8x16_t vtag;
+
+ union {
+ uint8_t e[4];
+ uint32_t word;
+ } vol;
+
+ const uint8x16_t pkttype_msk = {
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+ const uint8x16_t rsstype_msk = {
+ 0x0F, 0x0F, 0x0F, 0x0F,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+ const uint8x16_t rss_flags = {
+ 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, PKT_RX_FDIR};
+
+ ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0];
+ ptype = vandq_u8(ptype, rsstype_msk);
+ ptype = vqtbl1q_u8(rss_flags, ptype);
+
+ vtag = vshrq_n_u8(staterr, VTAG_SHIFT);
+ vtag = vandq_u8(vtag, pkttype_msk);
+ vtag = vorrq_u8(ptype, vtag);
+
+ vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts)
+#endif
+
+/*
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ * - don't support ol_flags for rss and csum err
+ */
+
+#define IXGBE_VPMD_DESC_DD_MASK 0x01010101
+#define IXGBE_VPMD_DESC_EOP_MASK 0x02020202
+
+static inline uint16_t
+_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ struct ixgbe_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ uint8x16_t shuf_msk = {
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* skip 32 bits pkt_type */
+ 12, 13, /* octet 12~13, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 12, 13, /* octet 12~13, 16 bits data_len */
+ 14, 15, /* octet 14~15, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+ uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
+ rxq->crc_len, 0, 0, 0};
+
+ /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch_non_temporal(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
+ ixgbe_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ return 0;
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_IXGBE_DESCS_PER_LOOP,
+ rxdp += RTE_IXGBE_DESCS_PER_LOOP) {
+ uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP];
+ uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ uint8x16x2_t sterr_tmp1, sterr_tmp2;
+ uint64x2_t mbp1, mbp2;
+ uint8x16_t staterr;
+ uint16x8_t tmp;
+ uint32_t stat;
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
+
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
+ rte_rmb();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
+
+ descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ descs[0] = vld1q_u64((uint64_t *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
+ pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
+ pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]),
+ vreinterpretq_u8_u64(descs[3]));
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]),
+ vreinterpretq_u8_u64(descs[2]));
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0];
+ stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0);
+
+ /* set ol_flags with vlan packet type */
+ desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr,
+ &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
+ pkt_mb4 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
+ pkt_mb3 = vreinterpretq_u8_u16(tmp);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
+ pkt_mb2 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
+ pkt_mb1 = vreinterpretq_u8_u16(tmp);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ /* and with mask to extract bits, flipping 1-0 */
+ *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
+
+ split_packet += RTE_IXGBE_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcount(stat & IXGBE_VPMD_DESC_DD_MASK);
+ nb_pkts_recd += var;
+ if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+/*
+ * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ * - don't support ol_flags for rss and csum err
+ */
+uint16_t
+ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+/*
+ * vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - don't support ol_flags for rss and csum err
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
+ * numbers of DD bit
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64x2_t descriptor = {
+ pkt->buf_physaddr + pkt->data_off,
+ (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len};
+
+ vst1q_u64((uint64_t *)&txdp->read, descriptor);
+}
+
+static inline void
+vtx(volatile union ixgbe_adv_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+ volatile union ixgbe_adv_tx_desc *txdp;
+ struct ixgbe_tx_entry_v *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = DCMD_DTYP_FLAGS;
+ uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ixgbe_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+ txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
+ txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+static void __attribute__((cold))
+ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_tx_queue_release_mbufs_vec(txq);
+}
+
+void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ _ixgbe_rx_queue_release_mbufs_vec(rxq);
+}
+
+static void __attribute__((cold))
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_tx_free_swring_vec(txq);
+}
+
+static void __attribute__((cold))
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+{
+ _ixgbe_reset_tx_queue_vec(txq);
+}
+
+static const struct ixgbe_txq_ops vec_txq_ops = {
+ .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
+ .free_swring = ixgbe_tx_free_swring,
+ .reset = ixgbe_reset_tx_queue,
+};
+
+int __attribute__((cold))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
+{
+ return ixgbe_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+{
+ return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
+}
+
+int __attribute__((cold))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index 50407043..4f95debd 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -37,6 +37,7 @@
#include "ixgbe_ethdev.h"
#include "ixgbe_rxtx.h"
+#include "ixgbe_rxtx_vec_common.h"
#include <tmmintrin.h>
@@ -140,10 +141,9 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
*/
#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-#define VTAG_SHIFT (3)
-
static inline void
-desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
+ struct rte_mbuf **rx_pkts)
{
__m128i ptype0, ptype1, vtag0, vtag1;
union {
@@ -151,11 +151,6 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
uint64_t dword;
} vol;
- /* pkt type + vlan olflags mask */
- const __m128i pkttype_msk = _mm_set_epi16(
- 0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
-
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
@@ -167,6 +162,19 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
+ /* mask everything except vlan present bit */
+ const __m128i vlan_msk = _mm_set_epi16(
+ 0x0000, 0x0000,
+ 0x0000, 0x0000,
+ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
+ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
+ /* map vlan present (0x8) to ol_flags */
+ const __m128i vlan_map = _mm_set_epi8(
+ 0, 0, 0, 0,
+ 0, 0, 0, vlan_flags,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0);
+
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]);
@@ -177,8 +185,8 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
- vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT);
- vtag1 = _mm_and_si128(vtag1, pkttype_msk);
+ vtag1 = _mm_and_si128(vtag1, vlan_msk);
+ vtag1 = _mm_shuffle_epi8(vlan_map, vtag1);
vtag1 = _mm_or_si128(ptype0, vtag1);
vol.dword = _mm_cvtsi128_si64(vtag1);
@@ -220,6 +228,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0, 0 /* ignore pkt_type field */
);
__m128i dd_check, eop_check;
+ uint8_t vlan_flags;
/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
@@ -228,18 +237,21 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
/* Just the act of getting into the function from the application is
- * going to cost about 7 cycles */
+ * going to cost about 7 cycles
+ */
rxdp = rxq->rx_ring + rxq->rx_tail;
_mm_prefetch((const void *)rxdp, _MM_HINT_T0);
/* See if we need to rearm the RX queue - gives the prefetch a bit
- * of time to act */
+ * of time to act
+ */
if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
ixgbe_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
- * there is actually a packet available */
+ * there is actually a packet available
+ */
if (!(rxdp->wb.upper.status_error &
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
@@ -262,9 +274,14 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
);
/* Cache is empty -> need to scan the buffer rings, but first move
- * the next 'n' mbufs into the cache */
+ * the next 'n' mbufs into the cache
+ */
sw_ring = &rxq->sw_ring[rxq->rx_tail];
+ /* ensure these 2 flags are in the lower 8 bits */
+ RTE_BUILD_BUG_ON((PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED) > UINT8_MAX);
+ vlan_flags = rxq->vlan_flags & UINT8_MAX;
+
/* A. load 4 packet in one loop
* [A*. mask out 4 unused dirty field in desc]
* B. copy 4 mbuf point from swring to rx_pkts
@@ -302,10 +319,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
if (split_packet) {
- rte_prefetch0(&rx_pkts[pos]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
}
/* avoid compiler reorder optimization */
@@ -325,7 +342,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
/* set ol_flags with vlan packet type */
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -359,7 +376,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* the staterr values are not in order, as the count
* count of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
- * compresses the 32-bit values to 8-bit */
+ * compresses the 32-bit values to 8-bit
+ */
eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
@@ -414,69 +432,6 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
-static inline uint16_t
-reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
- uint16_t nb_bufs, uint8_t *split_flags)
-{
- struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
- struct rte_mbuf *start = rxq->pkt_first_seg;
- struct rte_mbuf *end = rxq->pkt_last_seg;
- unsigned pkt_idx, buf_idx;
-
- for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
- if (end != NULL) {
- /* processing a split packet */
- end->next = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
-
- start->nb_segs++;
- start->pkt_len += rx_bufs[buf_idx]->data_len;
- end = end->next;
-
- if (!split_flags[buf_idx]) {
- /* it's the last packet of the set */
- start->hash = end->hash;
- start->ol_flags = end->ol_flags;
- /* we need to strip crc for the whole packet */
- start->pkt_len -= rxq->crc_len;
- if (end->data_len > rxq->crc_len)
- end->data_len -= rxq->crc_len;
- else {
- /* free up last mbuf */
- struct rte_mbuf *secondlast = start;
-
- start->nb_segs--;
- while (secondlast->next != end)
- secondlast = secondlast->next;
- secondlast->data_len -= (rxq->crc_len -
- end->data_len);
- secondlast->next = NULL;
- rte_pktmbuf_free_seg(end);
- end = secondlast;
- }
- pkts[pkt_idx++] = start;
- start = end = NULL;
- }
- } else {
- /* not processing a split packet */
- if (!split_flags[buf_idx]) {
- /* not a split packet, save and skip */
- pkts[pkt_idx++] = rx_bufs[buf_idx];
- continue;
- }
- end = start = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
- rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
- }
- }
-
- /* save the partial packet for next time */
- rxq->pkt_first_seg = start;
- rxq->pkt_last_seg = end;
- memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
- return pkt_idx;
-}
-
/*
* vPMD receive routine that reassembles scattered packets
*
@@ -535,76 +490,11 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
{
int i;
+
for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
vtx1(txdp, *pkt, flags);
}
-static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
-{
- struct ixgbe_tx_entry_v *txep;
- uint32_t status;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
-
- /* check DD bit on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
- if (!(status & IXGBE_ADVTXD_STAT_DD))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /*
- * first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool))
- free[nb_free++] = m;
- else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free, nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
-static inline void __attribute__((always_inline))
-tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- int i;
- for (i = 0; i < (int)nb_pkts; ++i)
- txep[i].mbuf = tx_pkts[i];
-}
-
uint16_t
ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -675,91 +565,25 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
static void __attribute__((cold))
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
- unsigned i;
- struct ixgbe_tx_entry_v *txe;
- const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
-
- if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
- return;
-
- /* release the used mbufs in sw_ring */
- for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
- i != txq->tx_tail;
- i = (i + 1) & max_desc) {
- txe = &txq->sw_ring_v[i];
- rte_pktmbuf_free_seg(txe->mbuf);
- }
- txq->nb_tx_free = max_desc;
-
- /* reset tx_entry */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- txe = &txq->sw_ring_v[i];
- txe->mbuf = NULL;
- }
+ _ixgbe_tx_queue_release_mbufs_vec(txq);
}
void __attribute__((cold))
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
- const unsigned mask = rxq->nb_rx_desc - 1;
- unsigned i;
-
- if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
- return;
-
- /* free all mbufs that are valid in the ring */
- for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
- rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
- rxq->rxrearm_nb = rxq->nb_rx_desc;
-
- /* set all entries to NULL */
- memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+ _ixgbe_rx_queue_release_mbufs_vec(rxq);
}
static void __attribute__((cold))
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
- if (txq == NULL)
- return;
-
- if (txq->sw_ring != NULL) {
- rte_free(txq->sw_ring_v - 1);
- txq->sw_ring_v = NULL;
- }
+ _ixgbe_tx_free_swring_vec(txq);
}
static void __attribute__((cold))
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
- static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
- struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
- uint16_t i;
-
- /* Zero out HW ring memory */
- for (i = 0; i < txq->nb_tx_desc; i++)
- txq->tx_ring[i] = zeroed_desc;
-
- /* Initialize SW ring entries */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
- txd->wb.status = IXGBE_TXD_STAT_DD;
- txe[i].mbuf = NULL;
- }
-
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
- txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
- txq->tx_tail = 0;
- txq->nb_tx_used = 0;
- /*
- * Always allow 1 descriptor to be un-allocated to avoid
- * a H/W race condition
- */
- txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
- txq->ctx_curr = 0;
- memset((void *)&txq->ctx_cache, 0,
- IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+ _ixgbe_reset_tx_queue_vec(txq);
}
static const struct ixgbe_txq_ops vec_txq_ops = {
@@ -771,63 +595,17 @@ static const struct ixgbe_txq_ops vec_txq_ops = {
int __attribute__((cold))
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
- uintptr_t p;
- struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
- mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
- rte_mbuf_refcnt_set(&mb_def, 1);
-
- /* prevent compiler reordering: rearm_data covers previous fields */
- rte_compiler_barrier();
- p = (uintptr_t)&mb_def.rearm_data;
- rxq->mbuf_initializer = *(uint64_t *)p;
- return 0;
+ return ixgbe_rxq_vec_setup_default(rxq);
}
int __attribute__((cold))
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
- if (txq->sw_ring_v == NULL)
- return -1;
-
- /* leave the first one for overflow */
- txq->sw_ring_v = txq->sw_ring_v + 1;
- txq->ops = &vec_txq_ops;
-
- return 0;
+ return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
int __attribute__((cold))
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-
-#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0)
- return -1;
-#endif
-
- /* no fdir support */
- if (fconf->mode != RTE_FDIR_MODE_NONE)
- return -1;
-
- /*
- * - no csum error report support
- * - no header split support
- */
- if (rxmode->hw_ip_checksum == 1 ||
- rxmode->header_split == 1)
- return -1;
-
- return 0;
-#else
- RTE_SET_USED(dev);
- return -1;
-#endif
+ return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
}
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index d2f56927..efed953e 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -48,6 +48,8 @@ CFLAGS += -O3
CFLAGS += -std=gnu99 -Wall -Wextra
CFLAGS += -g
CFLAGS += -I.
+CFLAGS += -D_BSD_SOURCE
+CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -libverbs
@@ -98,7 +100,9 @@ ifndef V
AUTOCONF_OUTPUT := >/dev/null
endif
-mlx4_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh
+mlx4_autoconf.h.new: FORCE
+
+mlx4_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
RSS_SUPPORT \
@@ -118,9 +122,16 @@ mlx4_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh
enum IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK \
$(AUTOCONF_OUTPUT)
+# Create mlx4_autoconf.h or update it in case it differs from the new one.
+
+mlx4_autoconf.h: mlx4_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
mlx4.o: mlx4_autoconf.h
clean_mlx4: FORCE
- $Q rm -f -- mlx4_autoconf.h
+ $Q rm -f -- mlx4_autoconf.h mlx4_autoconf.h.new
clean: clean_mlx4
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 4f21dbe2..f8ed42b8 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -54,7 +54,6 @@
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <netinet/in.h>
-#include <linux/if.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
#include <fcntl.h>
@@ -197,7 +196,6 @@ struct rxq {
unsigned int sp:1; /* Use scattered RX elements. */
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
- uint32_t mb_len; /* Length of a mp-issued mbuf. */
struct mlx4_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
@@ -659,7 +657,15 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu)
static int
priv_set_mtu(struct priv *priv, uint16_t mtu)
{
- return priv_set_sysfs_ulong(priv, "mtu", mtu);
+ uint16_t new_mtu;
+
+ if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
+ priv_get_mtu(priv, &new_mtu))
+ return -1;
+ if (new_mtu == mtu)
+ return 0;
+ errno = EINVAL;
+ return -1;
}
/**
@@ -998,7 +1004,7 @@ txq_alloc_elts(struct txq *txq, unsigned int elts_n)
}
mr_linear =
ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear),
- (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE));
+ IBV_ACCESS_LOCAL_WRITE);
if (mr_linear == NULL) {
ERROR("%p: unable to configure MR, ibv_reg_mr() failed",
(void *)txq);
@@ -1198,8 +1204,71 @@ txq_complete(struct txq *txq)
return 0;
}
+struct mlx4_check_mempool_data {
+ int ret;
+ char *start;
+ char *end;
+};
+
+/* Called by mlx4_check_mempool() when iterating the memory chunks. */
+static void mlx4_check_mempool_cb(struct rte_mempool *mp,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx)
+{
+ struct mlx4_check_mempool_data *data = opaque;
+
+ (void)mp;
+ (void)mem_idx;
+
+ /* It already failed, skip the next chunks. */
+ if (data->ret != 0)
+ return;
+ /* It is the first chunk. */
+ if (data->start == NULL && data->end == NULL) {
+ data->start = memhdr->addr;
+ data->end = data->start + memhdr->len;
+ return;
+ }
+ if (data->end == memhdr->addr) {
+ data->end += memhdr->len;
+ return;
+ }
+ if (data->start == (char *)memhdr->addr + memhdr->len) {
+ data->start -= memhdr->len;
+ return;
+ }
+ /* Error, mempool is not virtually contigous. */
+ data->ret = -1;
+}
+
+/**
+ * Check if a mempool can be used: it must be virtually contiguous.
+ *
+ * @param[in] mp
+ * Pointer to memory pool.
+ * @param[out] start
+ * Pointer to the start address of the mempool virtual memory area
+ * @param[out] end
+ * Pointer to the end address of the mempool virtual memory area
+ *
+ * @return
+ * 0 on success (mempool is virtually contiguous), -1 on error.
+ */
+static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
+ uintptr_t *end)
+{
+ struct mlx4_check_mempool_data data;
+
+ memset(&data, 0, sizeof(data));
+ rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
+ *start = (uintptr_t)data.start;
+ *end = (uintptr_t)data.end;
+
+ return data.ret;
+}
+
/* For best performance, this function should not be inlined. */
-static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, const struct rte_mempool *)
+static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
__attribute__((noinline));
/**
@@ -1214,15 +1283,21 @@ static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, const struct rte_mempool *)
* Memory region pointer, NULL in case of error.
*/
static struct ibv_mr *
-mlx4_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp)
+mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
{
const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start = mp->elt_va_start;
- uintptr_t end = mp->elt_va_end;
+ uintptr_t start;
+ uintptr_t end;
unsigned int i;
+ if (mlx4_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return NULL;
+ }
+
DEBUG("mempool %p area start=%p end=%p size=%zu",
- (const void *)mp, (void *)start, (void *)end,
+ (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
/* Round start and end to page boundary if found in memory segments. */
for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
@@ -1236,12 +1311,12 @@ mlx4_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp)
end = RTE_ALIGN_CEIL(end, align);
}
DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (const void *)mp, (void *)start, (void *)end,
+ (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
return ibv_reg_mr(pd,
(void *)start,
end - start,
- IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
+ IBV_ACCESS_LOCAL_WRITE);
}
/**
@@ -1276,7 +1351,7 @@ txq_mb2mp(struct rte_mbuf *buf)
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static uint32_t
-txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
{
unsigned int i;
struct ibv_mr *mr;
@@ -1294,7 +1369,7 @@ txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
}
/* Add a new entry, register MR first. */
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq, mp->name, (const void *)mp);
+ (void *)txq, mp->name, (void *)mp);
mr = mlx4_mp2mr(txq->priv->pd, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
@@ -1315,12 +1390,11 @@ txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
txq->mp2mr[i].mr = mr;
txq->mp2mr[i].lkey = mr->lkey;
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
+ (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
return txq->mp2mr[i].lkey;
}
struct txq_mp2mr_mbuf_check_data {
- const struct rte_mempool *mp;
int ret;
};
@@ -1328,34 +1402,26 @@ struct txq_mp2mr_mbuf_check_data {
* Callback function for rte_mempool_obj_iter() to check whether a given
* mempool object looks like a mbuf.
*
- * @param[in, out] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer
- * and return value.
- * @param[in] start
- * Object start address.
- * @param[in] end
- * Object end address.
+ * @param[in] mp
+ * The mempool pointer
+ * @param[in] arg
+ * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
+ * return value.
+ * @param[in] obj
+ * Object address.
* @param index
- * Unused.
- *
- * @return
- * Nonzero value when object is not a mbuf.
+ * Object index, unused.
*/
static void
-txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
- uint32_t index __rte_unused)
+txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
+ uint32_t index __rte_unused)
{
struct txq_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf =
- (void *)((uintptr_t)start + data->mp->header_size);
+ struct rte_mbuf *buf = obj;
- (void)index;
/* Check whether mbuf structure fits element size and whether mempool
* pointer is valid. */
- if (((uintptr_t)end >= (uintptr_t)(buf + 1)) &&
- (buf->pool == data->mp))
- data->ret = 0;
- else
+ if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
data->ret = -1;
}
@@ -1369,28 +1435,16 @@ txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
* Pointer to TX queue structure.
*/
static void
-txq_mp2mr_iter(const struct rte_mempool *mp, void *arg)
+txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
{
struct txq *txq = arg;
struct txq_mp2mr_mbuf_check_data data = {
- .mp = mp,
- .ret = -1,
+ .ret = 0,
};
- /* Discard empty mempools. */
- if (mp->size == 0)
- return;
/* Register mempool only if the first element looks like a mbuf. */
- rte_mempool_obj_iter((void *)mp->elt_va_start,
- 1,
- mp->header_size + mp->elt_size + mp->trailer_size,
- 1,
- mp->elt_pa,
- mp->pg_num,
- mp->pg_shift,
- txq_mp2mr_mbuf_check,
- &data);
- if (data.ret)
+ if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
+ data.ret == -1)
return;
txq_mp2mr(txq, mp);
}
@@ -3075,7 +3129,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* cacheline while allocating rep.
*/
rte_prefetch0(seg);
- rep = __rte_mbuf_raw_alloc(rxq->mp);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
@@ -3104,7 +3158,6 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rep->ol_flags = -1;
#endif
assert(rep->buf_len == seg->buf_len);
- assert(rep->buf_len == rxq->mb_len);
/* Reconfigure sge to use rep instead of seg. */
assert(sge->lkey == rxq->mr->lkey);
sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
@@ -3235,8 +3288,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Fetch initial bytes of packet descriptor into a
* cacheline while allocating rep.
*/
- rte_prefetch0(seg);
- rte_prefetch0(&seg->cacheline1);
+ rte_mbuf_prefetch_part1(seg);
+ rte_mbuf_prefetch_part2(seg);
ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
&flags);
if (unlikely(ret < 0)) {
@@ -3274,7 +3327,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (ret == 0)
break;
len = ret;
- rep = __rte_mbuf_raw_alloc(rxq->mp);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
@@ -3525,6 +3578,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
unsigned int i, k;
struct ibv_exp_qp_attr mod;
struct ibv_recv_wr *bad_wr;
+ unsigned int mb_len;
int err;
int parent = (rxq == &priv->rxq_parent);
@@ -3533,6 +3587,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
(void *)dev, (void *)rxq);
return EINVAL;
}
+ mb_len = rte_pktmbuf_data_room_size(rxq->mp);
DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
/* Number of descriptors and mbufs currently allocated. */
desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1));
@@ -3547,9 +3602,10 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
rxq->csum_l2tun = tmpl.csum_l2tun;
}
/* Enable scattered packets support for this queue if necessary. */
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
+ (mb_len - RTE_PKTMBUF_HEADROOM))) {
tmpl.sp = 1;
desc_n /= MLX4_PMD_SGE_WR_N;
} else
@@ -3740,7 +3796,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
} attr;
enum ibv_exp_query_intf_status status;
struct ibv_recv_wr *bad_wr;
- struct rte_mbuf *buf;
+ unsigned int mb_len;
int ret = 0;
int parent = (rxq == &priv->rxq_parent);
@@ -3756,31 +3812,22 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
desc = 1;
goto skip_mr;
}
+ mb_len = rte_pktmbuf_data_room_size(mp);
if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) {
ERROR("%p: invalid number of RX descriptors (must be a"
" multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N);
return EINVAL;
}
- /* Get mbuf length. */
- buf = rte_pktmbuf_alloc(mp);
- if (buf == NULL) {
- ERROR("%p: unable to allocate mbuf", (void *)dev);
- return ENOMEM;
- }
- tmpl.mb_len = buf->buf_len;
- assert((rte_pktmbuf_headroom(buf) +
- rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
- assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
- rte_pktmbuf_free(buf);
/* Toggle RX checksum offload if hardware supports it. */
if (priv->hw_csum)
tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
if (priv->hw_csum_l2tun)
tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
/* Enable scattered packets support for this queue if necessary. */
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
+ (mb_len - RTE_PKTMBUF_HEADROOM))) {
tmpl.sp = 1;
desc /= MLX4_PMD_SGE_WR_N;
}
@@ -4723,7 +4770,7 @@ mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = &edata;
+ ifr.ifr_data = (void *)&edata;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
strerror(errno));
@@ -4817,6 +4864,7 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* Reconfigure each RX queue. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
+ unsigned int mb_len;
unsigned int max_frame_len;
int sp;
@@ -4826,7 +4874,9 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* toggle scattered support (sp) if necessary. */
max_frame_len = (priv->mtu + ETHER_HDR_LEN +
(ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
- sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM));
+ mb_len = rte_pktmbuf_data_room_size(rxq->mp);
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
/* Provide new values to rxq_setup(). */
dev->data->dev_conf.rxmode.jumbo_frame = sp;
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
@@ -4882,7 +4932,7 @@ mlx4_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (mlx4_is_secondary())
return -E_RTE_SECONDARY;
- ifr.ifr_data = &ethpause;
+ ifr.ifr_data = (void *)&ethpause;
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
@@ -4932,7 +4982,7 @@ mlx4_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (mlx4_is_secondary())
return -E_RTE_SECONDARY;
- ifr.ifr_data = &ethpause;
+ ifr.ifr_data = (void *)&ethpause;
ethpause.autoneg = fc_conf->autoneg;
if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
(fc_conf->mode & RTE_FC_RX_PAUSE))
@@ -5358,7 +5408,7 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
rte_eal_alarm_cancel(mlx4_dev_link_status_handler, dev);
priv->pending_alarm = 0;
priv->intr_handle.fd = 0;
- priv->intr_handle.type = 0;
+ priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
}
/**
@@ -5757,22 +5807,16 @@ error:
static const struct rte_pci_id mlx4_pci_id_map[] = {
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3)
},
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO)
},
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3VF,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3VF)
},
{
.vendor_id = 0
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 92bfa070..f6d39388 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -47,18 +47,22 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_vlan.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
# Dependencies.
DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_kvargs
# Basic CFLAGS.
CFLAGS += -O3
CFLAGS += -std=gnu99 -Wall -Wextra
CFLAGS += -g
CFLAGS += -I.
+CFLAGS += -D_BSD_SOURCE
+CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -Wno-strict-prototypes
@@ -83,14 +87,6 @@ else
CFLAGS += -DNDEBUG -UPEDANTIC
endif
-ifdef CONFIG_RTE_LIBRTE_MLX5_SGE_WR_N
-CFLAGS += -DMLX5_PMD_SGE_WR_N=$(CONFIG_RTE_LIBRTE_MLX5_SGE_WR_N)
-endif
-
-ifdef CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE
-CFLAGS += -DMLX5_PMD_MAX_INLINE=$(CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE)
-endif
-
ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE
CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE)
endif
@@ -106,50 +102,31 @@ ifndef V
AUTOCONF_OUTPUT := >/dev/null
endif
-mlx5_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh
+mlx5_autoconf.h.new: FORCE
+
+mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
- HAVE_EXP_QUERY_DEVICE \
- infiniband/verbs.h \
- type 'struct ibv_exp_device_attr' $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_FLOW_SPEC_IPV6 \
- infiniband/verbs.h \
- type 'struct ibv_exp_flow_spec_ipv6' $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR \
- infiniband/verbs.h \
- enum IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS \
- infiniband/verbs.h \
- enum IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS \
+ HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
+ infiniband/verbs_exp.h \
+ enum IBV_EXP_CQ_COMPRESSED_CQE \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
- HAVE_EXP_CQ_RX_TCP_PACKET \
- infiniband/verbs.h \
- enum IBV_EXP_CQ_RX_TCP_PACKET \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_VERBS_FCS \
- infiniband/verbs.h \
- enum IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_VERBS_RX_END_PADDING \
- infiniband/verbs.h \
- enum IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_VERBS_VLAN_INSERTION \
- infiniband/verbs.h \
- enum IBV_EXP_RECEIVE_WQ_CVLAN_INSERTION \
+ HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE \
+ infiniband/mlx5_hw.h \
+ enum MLX5_ETH_VLAN_INLINE_HEADER_SIZE \
$(AUTOCONF_OUTPUT)
+# Create mlx5_autoconf.h or update it in case it differs from the new one.
+
+mlx5_autoconf.h: mlx5_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h
clean_mlx5: FORCE
- $Q rm -f -- mlx5_autoconf.h
+ $Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new
clean: clean_mlx5
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 041cfc33..5aa4adc6 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -37,6 +37,7 @@
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
+#include <errno.h>
#include <net/if.h>
/* Verbs header. */
@@ -57,6 +58,7 @@
#include <rte_ethdev.h>
#include <rte_pci.h>
#include <rte_common.h>
+#include <rte_kvargs.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -67,6 +69,21 @@
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
+/* Device parameter to enable RX completion queue compression. */
+#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
+
+/* Device parameter to configure inline send. */
+#define MLX5_TXQ_INLINE "txq_inline"
+
+/*
+ * Device parameter to configure the number of TX queues threshold for
+ * enabling inline send.
+ */
+#define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
+
+/* Device parameter to enable multi-packet send WQEs. */
+#define MLX5_TXQ_MPW_EN "txq_mpw_en"
+
/**
* Retrieve integer value from environment variable.
*
@@ -98,7 +115,6 @@ static void
mlx5_dev_close(struct rte_eth_dev *dev)
{
struct priv *priv = mlx5_get_priv(dev);
- void *tmp;
unsigned int i;
priv_lock(priv);
@@ -122,12 +138,15 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->rxqs_n); ++i) {
- tmp = (*priv->rxqs)[i];
- if (tmp == NULL)
+ struct rxq *rxq = (*priv->rxqs)[i];
+ struct rxq_ctrl *rxq_ctrl;
+
+ if (rxq == NULL)
continue;
+ rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
(*priv->rxqs)[i] = NULL;
- rxq_cleanup(tmp);
- rte_free(tmp);
+ rxq_cleanup(rxq_ctrl);
+ rte_free(rxq_ctrl);
}
priv->rxqs_n = 0;
priv->rxqs = NULL;
@@ -136,12 +155,15 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_tx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->txqs_n); ++i) {
- tmp = (*priv->txqs)[i];
- if (tmp == NULL)
+ struct txq *txq = (*priv->txqs)[i];
+ struct txq_ctrl *txq_ctrl;
+
+ if (txq == NULL)
continue;
+ txq_ctrl = container_of(txq, struct txq_ctrl, txq);
(*priv->txqs)[i] = NULL;
- txq_cleanup(tmp);
- rte_free(tmp);
+ txq_cleanup(txq_ctrl);
+ rte_free(txq_ctrl);
}
priv->txqs_n = 0;
priv->txqs = NULL;
@@ -190,17 +212,13 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.mac_addr_add = mlx5_mac_addr_add,
.mac_addr_set = mlx5_mac_addr_set,
.mtu_set = mlx5_dev_set_mtu,
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
.reta_update = mlx5_dev_rss_reta_update,
.reta_query = mlx5_dev_rss_reta_query,
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
-#ifdef MLX5_FDIR_SUPPORT
.filter_ctrl = mlx5_dev_filter_ctrl,
-#endif /* MLX5_FDIR_SUPPORT */
};
static struct {
@@ -236,6 +254,90 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
return ret;
}
+/**
+ * Verify and store value for device argument.
+ *
+ * @param[in] key
+ * Key argument to verify.
+ * @param[in] val
+ * Value associated with key.
+ * @param opaque
+ * User data.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mlx5_args_check(const char *key, const char *val, void *opaque)
+{
+ struct priv *priv = opaque;
+ unsigned long tmp;
+
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ WARN("%s: \"%s\" is not a valid integer", key, val);
+ return errno;
+ }
+ if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
+ priv->cqe_comp = !!tmp;
+ } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
+ priv->txq_inline = tmp;
+ } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
+ priv->txqs_inline = tmp;
+ } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
+ priv->mps = !!tmp;
+ } else {
+ WARN("%s: unknown parameter", key);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Parse device parameters.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param devargs
+ * Device arguments structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+mlx5_args(struct priv *priv, struct rte_devargs *devargs)
+{
+ const char **params = (const char *[]){
+ MLX5_RXQ_CQE_COMP_EN,
+ MLX5_TXQ_INLINE,
+ MLX5_TXQS_MIN_INLINE,
+ MLX5_TXQ_MPW_EN,
+ NULL,
+ };
+ struct rte_kvargs *kvlist;
+ int ret = 0;
+ int i;
+
+ if (devargs == NULL)
+ return 0;
+ /* Following UGLY cast is done to pass checkpatch. */
+ kvlist = rte_kvargs_parse(devargs->args, params);
+ if (kvlist == NULL)
+ return 0;
+ /* Process parameters. */
+ for (i = 0; (params[i] != NULL); ++i) {
+ if (rte_kvargs_count(kvlist, params[i])) {
+ ret = rte_kvargs_process(kvlist, params[i],
+ mlx5_args_check, priv);
+ if (ret != 0)
+ return ret;
+ }
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
static struct eth_driver mlx5_driver;
/**
@@ -260,7 +362,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
int err = 0;
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
- unsigned int vf;
+ unsigned int sriov;
unsigned int mps;
int idx;
int i;
@@ -303,17 +405,17 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
(pci_dev->addr.devid != pci_addr.devid) ||
(pci_dev->addr.function != pci_addr.function))
continue;
- vf = ((pci_dev->id.device_id ==
+ sriov = ((pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
(pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF));
/* Multi-packet send is only supported by ConnectX-4 Lx PF. */
mps = (pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4LX);
- INFO("PCI information matches, using device \"%s\" (VF: %s,"
- " MPS: %s)",
+ INFO("PCI information matches, using device \"%s\""
+ " (SR-IOV: %s, MPS: %s)",
list[i]->name,
- vf ? "true" : "false",
+ sriov ? "true" : "false",
mps ? "true" : "false");
attr_ctx = ibv_open_device(list[i]);
err = errno;
@@ -347,23 +449,16 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_pd *pd = NULL;
struct priv *priv = NULL;
struct rte_eth_dev *eth_dev;
-#ifdef HAVE_EXP_QUERY_DEVICE
struct ibv_exp_device_attr exp_device_attr;
-#endif /* HAVE_EXP_QUERY_DEVICE */
struct ether_addr mac;
+ uint16_t num_vfs = 0;
-#ifdef HAVE_EXP_QUERY_DEVICE
exp_device_attr.comp_mask =
IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
IBV_EXP_DEVICE_ATTR_RX_HASH |
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
-#ifdef HAVE_VERBS_RX_END_PADDING
IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
-#endif /* HAVE_VERBS_RX_END_PADDING */
0;
-#endif /* HAVE_EXP_QUERY_DEVICE */
DEBUG("using port %u (%08" PRIx32 ")", port, test);
@@ -414,7 +509,14 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->port = port;
priv->pd = pd;
priv->mtu = ETHER_MTU;
-#ifdef HAVE_EXP_QUERY_DEVICE
+ priv->mps = mps; /* Enable MPW by default if supported. */
+ priv->cqe_comp = 1; /* Enable compression by default. */
+ err = mlx5_args(priv, pci_dev->devargs);
+ if (err) {
+ ERROR("failed to process device arguments: %s",
+ strerror(err));
+ goto port_error;
+ }
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
goto port_error;
@@ -440,32 +542,28 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
DEBUG("maximum RX indirection table size is %u",
priv->ind_table_max_size);
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &
IBV_EXP_RECEIVE_WQ_CVLAN_STRIP);
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
DEBUG("VLAN stripping is %ssupported",
(priv->hw_vlan_strip ? "" : "not "));
-#ifdef HAVE_VERBS_FCS
priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags &
IBV_EXP_DEVICE_SCATTER_FCS);
-#endif /* HAVE_VERBS_FCS */
DEBUG("FCS stripping configuration is %ssupported",
(priv->hw_fcs_strip ? "" : "not "));
-#ifdef HAVE_VERBS_RX_END_PADDING
priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align;
-#endif /* HAVE_VERBS_RX_END_PADDING */
DEBUG("hardware RX end alignment padding is %ssupported",
(priv->hw_padding ? "" : "not "));
-#else /* HAVE_EXP_QUERY_DEVICE */
- priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
-#endif /* HAVE_EXP_QUERY_DEVICE */
-
- priv->vf = vf;
- priv->mps = mps;
+ priv_get_num_vfs(priv, &num_vfs);
+ priv->sriov = (num_vfs || sriov);
+ if (priv->mps && !mps) {
+ ERROR("multi-packet send not supported on this device"
+ " (" MLX5_TXQ_MPW_EN ")");
+ err = ENOTSUP;
+ goto port_error;
+ }
/* Allocate and register default RSS hash keys. */
priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
sizeof((*priv->rss_conf)[0]), 0);
@@ -608,28 +706,20 @@ error:
static const struct rte_pci_id mlx5_pci_id_map[] = {
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4)
},
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4VF,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4VF)
},
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LX,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LX)
},
{
- .vendor_id = PCI_VENDOR_ID_MELLANOX,
- .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
},
{
.vendor_id = 0
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 24876625..3a866098 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -39,7 +39,6 @@
#include <limits.h>
#include <net/if.h>
#include <netinet/in.h>
-#include <linux/if.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -69,6 +68,11 @@
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
+#if !defined(HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE) || \
+ !defined(HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE)
+#error Mellanox OFED >= 3.3 is required, please refer to the documentation.
+#endif
+
enum {
PCI_VENDOR_ID_MELLANOX = 0x15b3,
};
@@ -105,9 +109,12 @@ struct priv {
unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
- unsigned int vf:1; /* This is a VF device. */
+ unsigned int sriov:1; /* This is a VF or PF with VF devices. */
unsigned int mps:1; /* Whether multi-packet send is supported. */
+ unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */
+ unsigned int txq_inline; /* Maximum packet size for inlining. */
+ unsigned int txqs_inline; /* Queue number threshold for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
@@ -173,6 +180,7 @@ struct priv *mlx5_get_priv(struct rte_eth_dev *dev);
int mlx5_is_secondary(void);
int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]);
int priv_ifreq(const struct priv *, int req, struct ifreq *);
+int priv_get_num_vfs(struct priv *, uint16_t *);
int priv_get_mtu(struct priv *, uint16_t *);
int priv_set_flags(struct priv *, unsigned int, unsigned int);
int mlx5_dev_configure(struct rte_eth_dev *);
@@ -191,6 +199,8 @@ void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
struct priv *mlx5_secondary_data_setup(struct priv *priv);
+void priv_select_tx_function(struct priv *);
+void priv_select_rx_function(struct priv *);
/* mlx5_mac.c */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 09207d9c..cc2a6f3e 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -48,22 +48,15 @@
/* Maximum number of special flows. */
#define MLX5_MAX_SPECIAL_FLOWS 4
-/* Request send completion once in every 64 sends, might be less. */
-#define MLX5_PMD_TX_PER_COMP_REQ 64
+/*
+ * Request TX completion every time descriptors reach this threshold since
+ * the previous request. Must be a power of two for performance reasons.
+ */
+#define MLX5_TX_COMP_THRESH 32
/* RSS Indirection table size. */
#define RSS_INDIRECTION_TABLE_SIZE 256
-/* Maximum number of Scatter/Gather Elements per Work Request. */
-#ifndef MLX5_PMD_SGE_WR_N
-#define MLX5_PMD_SGE_WR_N 4
-#endif
-
-/* Maximum size for inline data. */
-#ifndef MLX5_PMD_MAX_INLINE
-#define MLX5_PMD_MAX_INLINE 0
-#endif
-
/*
* Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
* from which buffers are to be transmitted will have to be mapped by this
@@ -86,13 +79,4 @@
/* Alarm timeout. */
#define MLX5_ALARM_TIMEOUT_US 100000
-/*
- * Extended flow priorities necessary to support flow director are available
- * since MLNX_OFED 3.2. Considering this version adds support for VLAN
- * offloads as well, their availability means flow director can be used.
- */
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
-#define MLX5_FDIR_SUPPORT 1
-#endif
-
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 36b369e7..0e7ed019 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -44,7 +44,6 @@
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <netinet/in.h>
-#include <linux/if.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
#include <fcntl.h>
@@ -363,6 +362,38 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
}
/**
+ * Return the number of active VFs for the current device.
+ *
+ * @param[in] priv
+ * Pointer to private structure.
+ * @param[out] num_vfs
+ * Number of active VFs.
+ *
+ * @return
+ * 0 on success, -1 on failure and errno is set.
+ */
+int
+priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs)
+{
+ /* The sysfs entry name depends on the operating system. */
+ const char **name = (const char *[]){
+ "device/sriov_numvfs",
+ "device/mlx5_num_vfs",
+ NULL,
+ };
+ int ret;
+
+ do {
+ unsigned long ulong_num_vfs;
+
+ ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs);
+ if (!ret)
+ *num_vfs = ulong_num_vfs;
+ } while (*(++name) && ret);
+ return ret;
+}
+
+/**
* Get device MTU.
*
* @param priv
@@ -398,7 +429,15 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu)
static int
priv_set_mtu(struct priv *priv, uint16_t mtu)
{
- return priv_set_sysfs_ulong(priv, "mtu", mtu);
+ uint16_t new_mtu;
+
+ if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
+ priv_get_mtu(priv, &new_mtu))
+ return -1;
+ if (new_mtu == mtu)
+ return 0;
+ errno = EINVAL;
+ return -1;
}
/**
@@ -545,7 +584,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM) :
0);
- info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ if (!priv->mps)
+ info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
if (priv->hw_csum)
info->tx_offload_capa |=
(DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -584,8 +624,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
};
- if (dev->rx_pkt_burst == mlx5_rx_burst ||
- dev->rx_pkt_burst == mlx5_rx_burst_sp)
+ if (dev->rx_pkt_burst == mlx5_rx_burst)
return ptypes;
return NULL;
}
@@ -617,7 +656,7 @@ mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = &edata;
+ ifr.ifr_data = (void *)&edata;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
strerror(errno));
@@ -686,6 +725,9 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
unsigned int i;
uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
mlx5_rx_burst;
+ unsigned int max_frame_len;
+ int rehash;
+ int restart = priv->started;
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
@@ -699,7 +741,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
goto out;
} else
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
- priv->mtu = mtu;
/* Temporarily replace RX handler with a fake one, assuming it has not
* been copied elsewhere. */
dev->rx_pkt_burst = removed_rx_burst;
@@ -707,33 +748,94 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* removed_rx_burst() instead. */
rte_wmb();
usleep(1000);
+ /* MTU does not include header and CRC. */
+ max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
+ /* Check if at least one queue is going to need a SGE update. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ unsigned int mb_len;
+ unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
+ unsigned int sges_n;
+
+ if (rxq == NULL)
+ continue;
+ mb_len = rte_pktmbuf_data_room_size(rxq->mp);
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ /*
+ * Determine the number of SGEs needed for a full packet
+ * and round it to the next power of two.
+ */
+ sges_n = log2above((size / mb_len) + !!(size % mb_len));
+ if (sges_n != rxq->sges_n)
+ break;
+ }
+ /*
+ * If all queues have the right number of SGEs, a simple rehash
+ * of their buffers is enough, otherwise SGE information can only
+ * be updated in a queue by recreating it. All resources that depend
+ * on queues (flows, indirection tables) must be recreated as well in
+ * that case.
+ */
+ rehash = (i == priv->rxqs_n);
+ if (!rehash) {
+ /* Clean up everything as with mlx5_dev_stop(). */
+ priv_special_flow_disable_all(priv);
+ priv_mac_addrs_disable(priv);
+ priv_destroy_hash_rxqs(priv);
+ priv_fdir_disable(priv);
+ priv_dev_interrupt_handler_uninstall(priv, dev);
+ }
+recover:
/* Reconfigure each RX queue. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
- unsigned int max_frame_len;
+ struct rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct rxq_ctrl, rxq);
int sp;
+ unsigned int mb_len;
+ unsigned int tmp;
if (rxq == NULL)
continue;
- /* Calculate new maximum frame length according to MTU and
- * toggle scattered support (sp) if necessary. */
- max_frame_len = (priv->mtu + ETHER_HDR_LEN +
- (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
- sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM));
+ mb_len = rte_pktmbuf_data_room_size(rxq->mp);
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ /* Toggle scattered support (sp) if necessary. */
+ sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
/* Provide new values to rxq_setup(). */
dev->data->dev_conf.rxmode.jumbo_frame = sp;
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
- ret = rxq_rehash(dev, rxq);
- if (ret) {
- /* Force SP RX if that queue requires it and abort. */
- if (rxq->sp)
- rx_func = mlx5_rx_burst_sp;
- break;
+ if (rehash)
+ ret = rxq_rehash(dev, rxq_ctrl);
+ else
+ ret = rxq_ctrl_setup(dev, rxq_ctrl, rxq->elts_n,
+ rxq_ctrl->socket, NULL, rxq->mp);
+ if (!ret)
+ continue;
+ /* Attempt to roll back in case of error. */
+ tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
+ if (max_frame_len != tmp) {
+ max_frame_len = tmp;
+ goto recover;
}
- /* Scattered burst function takes priority. */
- if (rxq->sp)
- rx_func = mlx5_rx_burst_sp;
+ /* Double fault, disable RX. */
+ break;
+ }
+ /*
+ * Use a safe RX burst function in case of error, otherwise mimic
+ * mlx5_dev_start().
+ */
+ if (ret) {
+ ERROR("unable to reconfigure RX queues, RX disabled");
+ rx_func = removed_rx_burst;
+ } else if (restart &&
+ !rehash &&
+ !priv_create_hash_rxqs(priv) &&
+ !priv_rehash_flows(priv)) {
+ if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
+ priv_fdir_enable(priv);
+ priv_dev_interrupt_handler_install(priv, dev);
}
+ priv->mtu = mtu;
/* Burst functions can now be called again. */
rte_wmb();
dev->rx_pkt_burst = rx_func;
@@ -767,7 +869,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
- ifr.ifr_data = &ethpause;
+ ifr.ifr_data = (void *)&ethpause;
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
@@ -818,7 +920,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
- ifr.ifr_data = &ethpause;
+ ifr.ifr_data = (void *)&ethpause;
ethpause.autoneg = fc_conf->autoneg;
if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
(fc_conf->mode & RTE_FC_RX_PAUSE))
@@ -1012,7 +1114,7 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
priv->pending_alarm = 0;
priv->intr_handle.fd = 0;
- priv->intr_handle.type = 0;
+ priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
}
/**
@@ -1061,23 +1163,13 @@ priv_set_link(struct priv *priv, int up)
{
struct rte_eth_dev *dev = priv->dev;
int err;
- unsigned int i;
if (up) {
err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
if (err)
return err;
- for (i = 0; i < priv->rxqs_n; i++)
- if ((*priv->rxqs)[i]->sp)
- break;
- /* Check if an sp queue exists.
- * Note: Some old frames might be received.
- */
- if (i == priv->rxqs_n)
- dev->rx_pkt_burst = mlx5_rx_burst;
- else
- dev->rx_pkt_burst = mlx5_rx_burst_sp;
- dev->tx_pkt_burst = mlx5_tx_burst;
+ priv_select_tx_function(priv);
+ priv_select_rx_function(priv);
} else {
err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
if (err)
@@ -1209,34 +1301,40 @@ mlx5_secondary_data_setup(struct priv *priv)
/* TX queues. */
for (i = 0; i != nb_tx_queues; ++i) {
struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
- struct txq *txq;
+ struct txq_ctrl *primary_txq_ctrl;
+ struct txq_ctrl *txq_ctrl;
if (primary_txq == NULL)
continue;
- txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0,
- primary_txq->socket);
- if (txq != NULL) {
- if (txq_setup(priv->dev,
- txq,
- primary_txq->elts_n * MLX5_PMD_SGE_WR_N,
- primary_txq->socket,
- NULL) == 0) {
- txq->stats.idx = primary_txq->stats.idx;
- tx_queues[i] = txq;
+ primary_txq_ctrl = container_of(primary_txq,
+ struct txq_ctrl, txq);
+ txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl), 0,
+ primary_txq_ctrl->socket);
+ if (txq_ctrl != NULL) {
+ if (txq_ctrl_setup(priv->dev,
+ primary_txq_ctrl,
+ primary_txq->elts_n,
+ primary_txq_ctrl->socket,
+ NULL) == 0) {
+ txq_ctrl->txq.stats.idx =
+ primary_txq->stats.idx;
+ tx_queues[i] = &txq_ctrl->txq;
continue;
}
- rte_free(txq);
+ rte_free(txq_ctrl);
}
while (i) {
- txq = tx_queues[--i];
- txq_cleanup(txq);
- rte_free(txq);
+ txq_ctrl = tx_queues[--i];
+ txq_cleanup(txq_ctrl);
+ rte_free(txq_ctrl);
}
goto error;
}
/* RX queues. */
for (i = 0; i != nb_rx_queues; ++i) {
- struct rxq *primary_rxq = (*sd->primary_priv->rxqs)[i];
+ struct rxq_ctrl *primary_rxq =
+ container_of((*sd->primary_priv->rxqs)[i],
+ struct rxq_ctrl, rxq);
if (primary_rxq == NULL)
continue;
@@ -1263,13 +1361,11 @@ mlx5_secondary_data_setup(struct priv *priv)
rte_mb();
priv->dev->data = &sd->data;
rte_mb();
- priv->dev->tx_pkt_burst = mlx5_tx_burst;
- priv->dev->rx_pkt_burst = removed_rx_burst;
+ priv_select_tx_function(priv);
+ priv_select_rx_function(priv);
priv_unlock(priv);
end:
/* More sanity checks. */
- assert(priv->dev->tx_pkt_burst == mlx5_tx_burst);
- assert(priv->dev->rx_pkt_burst == removed_rx_burst);
assert(priv->dev->data == &sd->data);
rte_spinlock_unlock(&sd->lock);
return priv;
@@ -1280,3 +1376,42 @@ error:
rte_spinlock_unlock(&sd->lock);
return NULL;
}
+
+/**
+ * Configure the TX function to use.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_select_tx_function(struct priv *priv)
+{
+ priv->dev->tx_pkt_burst = mlx5_tx_burst;
+ /* Display warning for unsupported configurations. */
+ if (priv->sriov && priv->mps)
+ WARN("multi-packet send WQE cannot be used on a SR-IOV setup");
+ /* Select appropriate TX function. */
+ if ((priv->sriov == 0) && priv->mps && priv->txq_inline) {
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
+ DEBUG("selected MPW inline TX function");
+ } else if ((priv->sriov == 0) && priv->mps) {
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw;
+ DEBUG("selected MPW TX function");
+ } else if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ priv->dev->tx_pkt_burst = mlx5_tx_burst_inline;
+ DEBUG("selected inline TX function (%u >= %u queues)",
+ priv->txqs_n, priv->txqs_inline);
+ }
+}
+
+/**
+ * Configure the RX function to use.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_select_rx_function(struct priv *priv)
+{
+ priv->dev->rx_pkt_burst = mlx5_rx_burst;
+}
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 63e43ad9..73eb00ec 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -122,7 +122,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
desc->type = HASH_RXQ_IPV4;
break;
-#ifdef HAVE_FLOW_SPEC_IPV6
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
desc->type = HASH_RXQ_UDPV6;
break;
@@ -132,7 +131,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
desc->type = HASH_RXQ_IPV6;
break;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
@@ -147,7 +145,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
break;
-#ifdef HAVE_FLOW_SPEC_IPV6
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
desc->src_port = fdir_filter->input.flow.udp6_flow.src_port;
@@ -161,7 +158,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.ipv6_flow.dst_ip,
sizeof(desc->dst_ip));
break;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
@@ -211,7 +207,6 @@ priv_fdir_overlap(const struct priv *priv,
(desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
return 0;
break;
-#ifdef HAVE_FLOW_SPEC_IPV6
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
case HASH_RXQ_TCPV6:
@@ -222,7 +217,6 @@ priv_fdir_overlap(const struct priv *priv,
(desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i])))
return 0;
break;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
break;
}
@@ -258,9 +252,7 @@ priv_fdir_flow_add(struct priv *priv,
uintptr_t spec_offset = (uintptr_t)&data->spec;
struct ibv_exp_flow_spec_eth *spec_eth;
struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
-#ifdef HAVE_FLOW_SPEC_IPV6
struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
struct mlx5_fdir_filter *iter_fdir_filter;
unsigned int i;
@@ -334,7 +326,6 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_ipv4->size;
break;
-#ifdef HAVE_FLOW_SPEC_IPV6
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
case HASH_RXQ_TCPV6:
@@ -368,7 +359,6 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_ipv6->size;
break;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
default:
ERROR("invalid flow attribute type");
return EINVAL;
@@ -424,7 +414,9 @@ create_flow:
static struct fdir_queue *
priv_get_fdir_queue(struct priv *priv, uint16_t idx)
{
- struct fdir_queue *fdir_queue = &(*priv->rxqs)[idx]->fdir_queue;
+ struct rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq);
+ struct fdir_queue *fdir_queue = &rxq_ctrl->fdir_queue;
struct ibv_exp_rwq_ind_table *ind_table = NULL;
struct ibv_qp *qp = NULL;
struct ibv_exp_rwq_ind_table_init_attr ind_init_attr;
@@ -439,7 +431,7 @@ priv_get_fdir_queue(struct priv *priv, uint16_t idx)
ind_init_attr = (struct ibv_exp_rwq_ind_table_init_attr){
.pd = priv->pd,
.log_ind_tbl_size = 0,
- .ind_tbl = &((*priv->rxqs)[idx]->wq),
+ .ind_tbl = &rxq_ctrl->wq,
.comp_mask = 0,
};
@@ -629,8 +621,10 @@ priv_fdir_disable(struct priv *priv)
/* Run on every RX queue to destroy related flow director QP and
* indirection table. */
for (i = 0; (i != priv->rxqs_n); i++) {
- fdir_queue = &(*priv->rxqs)[i]->fdir_queue;
+ struct rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
+ fdir_queue = &rxq_ctrl->fdir_queue;
if (fdir_queue->qp != NULL) {
claim_zero(ibv_destroy_qp(fdir_queue->qp));
fdir_queue->qp = NULL;
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index c9cea485..f6b27bb8 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -38,7 +38,6 @@
#include <inttypes.h>
#include <errno.h>
#include <netinet/in.h>
-#include <linux/if.h>
#include <sys/ioctl.h>
#include <arpa/inet.h>
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
new file mode 100644
index 00000000..67dfefa8
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -0,0 +1,283 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-pedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-pedantic"
+#endif
+
+/* DPDK headers don't like -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-pedantic"
+#endif
+#include <rte_mempool.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-pedantic"
+#endif
+
+#include "mlx5.h"
+#include "mlx5_rxtx.h"
+
+struct mlx5_check_mempool_data {
+ int ret;
+ char *start;
+ char *end;
+};
+
+/* Called by mlx5_check_mempool() when iterating the memory chunks. */
+static void
+mlx5_check_mempool_cb(struct rte_mempool *mp,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned int mem_idx)
+{
+ struct mlx5_check_mempool_data *data = opaque;
+
+ (void)mp;
+ (void)mem_idx;
+
+ /* It already failed, skip the next chunks. */
+ if (data->ret != 0)
+ return;
+ /* It is the first chunk. */
+ if (data->start == NULL && data->end == NULL) {
+ data->start = memhdr->addr;
+ data->end = data->start + memhdr->len;
+ return;
+ }
+ if (data->end == memhdr->addr) {
+ data->end += memhdr->len;
+ return;
+ }
+ if (data->start == (char *)memhdr->addr + memhdr->len) {
+ data->start -= memhdr->len;
+ return;
+ }
+ /* Error, mempool is not virtually contiguous. */
+ data->ret = -1;
+}
+
+/**
+ * Check if a mempool can be used: it must be virtually contiguous.
+ *
+ * @param[in] mp
+ * Pointer to memory pool.
+ * @param[out] start
+ * Pointer to the start address of the mempool virtual memory area
+ * @param[out] end
+ * Pointer to the end address of the mempool virtual memory area
+ *
+ * @return
+ * 0 on success (mempool is virtually contiguous), -1 on error.
+ */
+static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
+ uintptr_t *end)
+{
+ struct mlx5_check_mempool_data data;
+
+ memset(&data, 0, sizeof(data));
+ rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
+ *start = (uintptr_t)data.start;
+ *end = (uintptr_t)data.end;
+
+ return data.ret;
+}
+
+/**
+ * Register mempool as a memory region.
+ *
+ * @param pd
+ * Pointer to protection domain.
+ * @param mp
+ * Pointer to memory pool.
+ *
+ * @return
+ * Memory region pointer, NULL in case of error.
+ */
+struct ibv_mr *
+mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ uintptr_t start;
+ uintptr_t end;
+ unsigned int i;
+
+ if (mlx5_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return NULL;
+ }
+
+ DEBUG("mempool %p area start=%p end=%p size=%zu",
+ (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ /* Round start and end to page boundary if found in memory segments. */
+ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
+ uintptr_t addr = (uintptr_t)ms[i].addr;
+ size_t len = ms[i].len;
+ unsigned int align = ms[i].hugepage_sz;
+
+ if ((start > addr) && (start < addr + len))
+ start = RTE_ALIGN_FLOOR(start, align);
+ if ((end > addr) && (end < addr + len))
+ end = RTE_ALIGN_CEIL(end, align);
+ }
+ DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
+ (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ return ibv_reg_mr(pd,
+ (void *)start,
+ end - start,
+ IBV_ACCESS_LOCAL_WRITE);
+}
+
+/**
+ * Register a Memory Region (MR) <-> Memory Pool (MP) association in
+ * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
+ *
+ * This function should only be called by txq_mp2mr().
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param[in] mp
+ * Memory Pool for which a Memory Region lkey must be returned.
+ * @param idx
+ * Index of the next available entry.
+ *
+ * @return
+ * mr->lkey on success, (uint32_t)-1 on failure.
+ */
+uint32_t
+txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx)
+{
+ struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ struct ibv_mr *mr;
+
+ /* Add a new entry, register MR first. */
+ DEBUG("%p: discovered new memory pool \"%s\" (%p)",
+ (void *)txq_ctrl, mp->name, (void *)mp);
+ mr = mlx5_mp2mr(txq_ctrl->priv->pd, mp);
+ if (unlikely(mr == NULL)) {
+ DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
+ (void *)txq_ctrl);
+ return (uint32_t)-1;
+ }
+ if (unlikely(idx == RTE_DIM(txq_ctrl->txq.mp2mr))) {
+ /* Table is full, remove oldest entry. */
+ DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
+ (void *)txq_ctrl);
+ --idx;
+ claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[0].mr));
+ memmove(&txq_ctrl->txq.mp2mr[0], &txq_ctrl->txq.mp2mr[1],
+ (sizeof(txq_ctrl->txq.mp2mr) -
+ sizeof(txq_ctrl->txq.mp2mr[0])));
+ }
+ /* Store the new entry. */
+ txq_ctrl->txq.mp2mr[idx].mp = mp;
+ txq_ctrl->txq.mp2mr[idx].mr = mr;
+ txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
+ DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
+ (void *)txq_ctrl, mp->name, (void *)mp,
+ txq_ctrl->txq.mp2mr[idx].lkey);
+ return txq_ctrl->txq.mp2mr[idx].lkey;
+}
+
+struct txq_mp2mr_mbuf_check_data {
+ int ret;
+};
+
+/**
+ * Callback function for rte_mempool_obj_iter() to check whether a given
+ * mempool object looks like a mbuf.
+ *
+ * @param[in] mp
+ * The mempool pointer
+ * @param[in] arg
+ * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
+ * return value.
+ * @param[in] obj
+ * Object address.
+ * @param index
+ * Object index, unused.
+ */
+static void
+txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
+ uint32_t index __rte_unused)
+{
+ struct txq_mp2mr_mbuf_check_data *data = arg;
+ struct rte_mbuf *buf = obj;
+
+ /*
+ * Check whether mbuf structure fits element size and whether mempool
+ * pointer is valid.
+ */
+ if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
+ data->ret = -1;
+}
+
+/**
+ * Iterator function for rte_mempool_walk() to register existing mempools and
+ * fill the MP to MR cache of a TX queue.
+ *
+ * @param[in] mp
+ * Memory Pool to register.
+ * @param *arg
+ * Pointer to TX queue structure.
+ */
+void
+txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
+{
+ struct txq_ctrl *txq_ctrl = arg;
+ struct txq_mp2mr_mbuf_check_data data = {
+ .ret = 0,
+ };
+ unsigned int i;
+
+ /* Register mempool only if the first element looks like a mbuf. */
+ if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
+ data.ret == -1)
+ return;
+ for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
+ if (unlikely(txq_ctrl->txq.mp2mr[i].mp == NULL)) {
+ /* Unknown MP, add a new MR for it. */
+ break;
+ }
+ if (txq_ctrl->txq.mp2mr[i].mp == mp)
+ return;
+ }
+ txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
+}
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
new file mode 100644
index 00000000..5db219b3
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX5_PRM_H_
+#define RTE_PMD_MLX5_PRM_H_
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-pedantic"
+#endif
+#include <infiniband/mlx5_hw.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-pedantic"
+#endif
+
+/* Get CQE owner bit. */
+#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
+
+/* Get CQE format. */
+#define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)
+
+/* Get CQE opcode. */
+#define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)
+
+/* Get CQE solicited event. */
+#define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)
+
+/* Invalidate a CQE. */
+#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
+
+/* CQE value to inform that VLAN is stripped. */
+#define MLX5_CQE_VLAN_STRIPPED 0x1
+
+/* Maximum number of packets a multi-packet WQE can handle. */
+#define MLX5_MPW_DSEG_MAX 5
+
+/* Room for inline data in regular work queue element. */
+#define MLX5_WQE64_INL_DATA 12
+
+/* Room for inline data in multi-packet WQE. */
+#define MLX5_MWQE64_INL_DATA 28
+
+/* Subset of struct mlx5_wqe_eth_seg. */
+struct mlx5_wqe_eth_seg_small {
+ uint32_t rsvd0;
+ uint8_t cs_flags;
+ uint8_t rsvd1;
+ uint16_t mss;
+ uint32_t rsvd2;
+ uint16_t inline_hdr_sz;
+};
+
+/* Regular WQE. */
+struct mlx5_wqe_regular {
+ union {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ uint32_t data[4];
+ } ctrl;
+ struct mlx5_wqe_eth_seg eseg;
+ struct mlx5_wqe_data_seg dseg;
+} __rte_aligned(64);
+
+/* Inline WQE. */
+struct mlx5_wqe_inl {
+ union {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ uint32_t data[4];
+ } ctrl;
+ struct mlx5_wqe_eth_seg eseg;
+ uint32_t byte_cnt;
+ uint8_t data[MLX5_WQE64_INL_DATA];
+} __rte_aligned(64);
+
+/* Multi-packet WQE. */
+struct mlx5_wqe_mpw {
+ union {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ uint32_t data[4];
+ } ctrl;
+ struct mlx5_wqe_eth_seg_small eseg;
+ struct mlx5_wqe_data_seg dseg[2];
+} __rte_aligned(64);
+
+/* Multi-packet WQE with inline. */
+struct mlx5_wqe_mpw_inl {
+ union {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ uint32_t data[4];
+ } ctrl;
+ struct mlx5_wqe_eth_seg_small eseg;
+ uint32_t byte_cnt;
+ uint8_t data[MLX5_MWQE64_INL_DATA];
+} __rte_aligned(64);
+
+/* Union of all WQE types. */
+union mlx5_wqe {
+ struct mlx5_wqe_regular wqe;
+ struct mlx5_wqe_inl inl;
+ struct mlx5_wqe_mpw mpw;
+ struct mlx5_wqe_mpw_inl mpw_inl;
+ uint8_t data[64];
+};
+
+/* MPW session status. */
+enum mlx5_mpw_state {
+ MLX5_MPW_STATE_OPENED,
+ MLX5_MPW_INL_STATE_OPENED,
+ MLX5_MPW_STATE_CLOSED,
+};
+
+/* MPW session descriptor. */
+struct mlx5_mpw {
+ enum mlx5_mpw_state state;
+ unsigned int pkts_n;
+ unsigned int len;
+ unsigned int total_len;
+ volatile union mlx5_wqe *wqe;
+ union {
+ volatile struct mlx5_wqe_data_seg *dseg[MLX5_MPW_DSEG_MAX];
+ volatile uint8_t *raw;
+ } data;
+};
+
+/* CQ element structure - should be equal to the cache line size */
+struct mlx5_cqe {
+#if (RTE_CACHE_LINE_SIZE == 128)
+ uint8_t padding[64];
+#endif
+ struct mlx5_cqe64 cqe64;
+};
+
+#endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 3a55f633..8b585554 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -67,11 +67,9 @@ static const struct special_flow_init special_flow_init[] = {
1 << HASH_RXQ_TCPV4 |
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
-#ifdef HAVE_FLOW_SPEC_IPV6
1 << HASH_RXQ_TCPV6 |
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
-#endif /* HAVE_FLOW_SPEC_IPV6 */
1 << HASH_RXQ_ETH |
0,
.per_vlan = 0,
@@ -82,10 +80,8 @@ static const struct special_flow_init special_flow_init[] = {
.hash_types =
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
-#ifdef HAVE_FLOW_SPEC_IPV6
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
-#endif /* HAVE_FLOW_SPEC_IPV6 */
1 << HASH_RXQ_ETH |
0,
.per_vlan = 0,
@@ -96,15 +92,12 @@ static const struct special_flow_init special_flow_init[] = {
.hash_types =
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
-#ifdef HAVE_FLOW_SPEC_IPV6
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
-#endif /* HAVE_FLOW_SPEC_IPV6 */
1 << HASH_RXQ_ETH |
0,
.per_vlan = 1,
},
-#ifdef HAVE_FLOW_SPEC_IPV6
[HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
.dst_mac_val = "\x33\x33\x00\x00\x00\x00",
.dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
@@ -115,7 +108,6 @@ static const struct special_flow_init special_flow_init[] = {
0,
.per_vlan = 1,
},
-#endif /* HAVE_FLOW_SPEC_IPV6 */
};
/**
@@ -355,7 +347,9 @@ priv_special_flow_enable_all(struct priv *priv)
{
enum hash_rxq_flow_type flow_type;
- for (flow_type = 0; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) {
+ for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
+ flow_type != HASH_RXQ_FLOW_TYPE_MAC;
+ ++flow_type) {
int ret;
ret = priv_special_flow_enable(priv, flow_type);
@@ -380,7 +374,9 @@ priv_special_flow_disable_all(struct priv *priv)
{
enum hash_rxq_flow_type flow_type;
- for (flow_type = 0; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type)
+ for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
+ flow_type != HASH_RXQ_FLOW_TYPE_MAC;
+ ++flow_type)
priv_special_flow_disable(priv, flow_type);
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index cbb017bb..29c137cd 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -43,6 +43,8 @@
#pragma GCC diagnostic ignored "-pedantic"
#endif
#include <infiniband/verbs.h>
+#include <infiniband/arch.h>
+#include <infiniband/mlx5_hw.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -105,7 +107,6 @@ const struct hash_rxq_init hash_rxq_init[] = {
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
},
-#ifdef HAVE_FLOW_SPEC_IPV6
[HASH_RXQ_TCPV6] = {
.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
IBV_EXP_RX_HASH_DST_IPV6 |
@@ -144,7 +145,6 @@ const struct hash_rxq_init hash_rxq_init[] = {
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
},
-#endif /* HAVE_FLOW_SPEC_IPV6 */
[HASH_RXQ_ETH] = {
.hash_fields = 0,
.dpdk_rss_hf = 0,
@@ -168,17 +168,11 @@ static const struct ind_table_init ind_table_init[] = {
1 << HASH_RXQ_TCPV4 |
1 << HASH_RXQ_UDPV4 |
1 << HASH_RXQ_IPV4 |
-#ifdef HAVE_FLOW_SPEC_IPV6
1 << HASH_RXQ_TCPV6 |
1 << HASH_RXQ_UDPV6 |
1 << HASH_RXQ_IPV6 |
-#endif /* HAVE_FLOW_SPEC_IPV6 */
0,
-#ifdef HAVE_FLOW_SPEC_IPV6
.hash_types_n = 6,
-#else /* HAVE_FLOW_SPEC_IPV6 */
- .hash_types_n = 3,
-#endif /* HAVE_FLOW_SPEC_IPV6 */
},
{
.max_size = 1,
@@ -243,12 +237,8 @@ priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,
init = &hash_rxq_init[type];
*flow_attr = (struct ibv_exp_flow_attr){
.type = IBV_EXP_FLOW_ATTR_NORMAL,
-#ifdef MLX5_FDIR_SUPPORT
/* Priorities < 3 are reserved for flow director. */
.priority = init->flow_priority + 3,
-#else /* MLX5_FDIR_SUPPORT */
- .priority = init->flow_priority,
-#endif /* MLX5_FDIR_SUPPORT */
.num_of_specs = 0,
.port = priv->port,
.flags = 0,
@@ -279,7 +269,7 @@ priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,
static enum hash_rxq_type
hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
{
- enum hash_rxq_type type = 0;
+ enum hash_rxq_type type = HASH_RXQ_TCPV4;
assert(pos < table->hash_types_n);
do {
@@ -385,8 +375,13 @@ priv_create_hash_rxqs(struct priv *priv)
DEBUG("indirection table extended to assume %u WQs",
priv->reta_idx_n);
}
- for (i = 0; (i != priv->reta_idx_n); ++i)
- wqs[i] = (*priv->rxqs)[(*priv->reta_idx)[i]]->wq;
+ for (i = 0; (i != priv->reta_idx_n); ++i) {
+ struct rxq_ctrl *rxq_ctrl;
+
+ rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]],
+ struct rxq_ctrl, rxq);
+ wqs[i] = rxq_ctrl->wq;
+ }
/* Get number of hash RX queues to configure. */
for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
hash_rxqs_n += ind_table_init[i].hash_types_n;
@@ -589,9 +584,7 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
case HASH_RXQ_FLOW_TYPE_ALLMULTI:
return !!priv->allmulti_req;
case HASH_RXQ_FLOW_TYPE_BROADCAST:
-#ifdef HAVE_FLOW_SPEC_IPV6
case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
-#endif /* HAVE_FLOW_SPEC_IPV6 */
/* If allmulti is enabled, broadcast and ipv6multi
* are unnecessary. */
return !priv->allmulti_req;
@@ -616,9 +609,11 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
int
priv_rehash_flows(struct priv *priv)
{
- unsigned int i;
+ enum hash_rxq_flow_type i;
- for (i = 0; (i != RTE_DIM((*priv->hash_rxqs)[0].special_flow)); ++i)
+ for (i = HASH_RXQ_FLOW_TYPE_PROMISC;
+ i != RTE_DIM((*priv->hash_rxqs)[0].special_flow);
+ ++i)
if (!priv_allow_flow_type(priv, i)) {
priv_special_flow_disable(priv, i);
} else {
@@ -634,148 +629,9 @@ priv_rehash_flows(struct priv *priv)
}
/**
- * Allocate RX queue elements with scattered packets support.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param elts_n
- * Number of elements to allocate.
- * @param[in] pool
- * If not NULL, fetch buffers from this array instead of allocating them
- * with rte_pktmbuf_alloc().
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n,
- struct rte_mbuf **pool)
-{
- unsigned int i;
- struct rxq_elt_sp (*elts)[elts_n] =
- rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
- rxq->socket);
- int ret = 0;
-
- if (elts == NULL) {
- ERROR("%p: can't allocate packets array", (void *)rxq);
- ret = ENOMEM;
- goto error;
- }
- /* For each WR (packet). */
- for (i = 0; (i != elts_n); ++i) {
- unsigned int j;
- struct rxq_elt_sp *elt = &(*elts)[i];
- struct ibv_sge (*sges)[RTE_DIM(elt->sges)] = &elt->sges;
-
- /* These two arrays must have the same size. */
- assert(RTE_DIM(elt->sges) == RTE_DIM(elt->bufs));
- /* For each SGE (segment). */
- for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
- struct ibv_sge *sge = &(*sges)[j];
- struct rte_mbuf *buf;
-
- if (pool != NULL) {
- buf = *(pool++);
- assert(buf != NULL);
- rte_pktmbuf_reset(buf);
- } else
- buf = rte_pktmbuf_alloc(rxq->mp);
- if (buf == NULL) {
- assert(pool == NULL);
- ERROR("%p: empty mbuf pool", (void *)rxq);
- ret = ENOMEM;
- goto error;
- }
- elt->bufs[j] = buf;
- /* Headroom is reserved by rte_pktmbuf_alloc(). */
- assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
- /* Buffer is supposed to be empty. */
- assert(rte_pktmbuf_data_len(buf) == 0);
- assert(rte_pktmbuf_pkt_len(buf) == 0);
- /* sge->addr must be able to store a pointer. */
- assert(sizeof(sge->addr) >= sizeof(uintptr_t));
- if (j == 0) {
- /* The first SGE keeps its headroom. */
- sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
- sge->length = (buf->buf_len -
- RTE_PKTMBUF_HEADROOM);
- } else {
- /* Subsequent SGEs lose theirs. */
- assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
- SET_DATA_OFF(buf, 0);
- sge->addr = (uintptr_t)buf->buf_addr;
- sge->length = buf->buf_len;
- }
- sge->lkey = rxq->mr->lkey;
- /* Redundant check for tailroom. */
- assert(sge->length == rte_pktmbuf_tailroom(buf));
- }
- }
- DEBUG("%p: allocated and configured %u WRs (%zu segments)",
- (void *)rxq, elts_n, (elts_n * RTE_DIM((*elts)[0].sges)));
- rxq->elts_n = elts_n;
- rxq->elts_head = 0;
- rxq->elts.sp = elts;
- assert(ret == 0);
- return 0;
-error:
- if (elts != NULL) {
- assert(pool == NULL);
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- unsigned int j;
- struct rxq_elt_sp *elt = &(*elts)[i];
-
- for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
- struct rte_mbuf *buf = elt->bufs[j];
-
- if (buf != NULL)
- rte_pktmbuf_free_seg(buf);
- }
- }
- rte_free(elts);
- }
- DEBUG("%p: failed, freed everything", (void *)rxq);
- assert(ret > 0);
- return ret;
-}
-
-/**
- * Free RX queue elements with scattered packets support.
- *
- * @param rxq
- * Pointer to RX queue structure.
- */
-static void
-rxq_free_elts_sp(struct rxq *rxq)
-{
- unsigned int i;
- unsigned int elts_n = rxq->elts_n;
- struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp;
-
- DEBUG("%p: freeing WRs", (void *)rxq);
- rxq->elts_n = 0;
- rxq->elts.sp = NULL;
- if (elts == NULL)
- return;
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- unsigned int j;
- struct rxq_elt_sp *elt = &(*elts)[i];
-
- for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
- struct rte_mbuf *buf = elt->bufs[j];
-
- if (buf != NULL)
- rte_pktmbuf_free_seg(buf);
- }
- }
- rte_free(elts);
-}
-
-/**
* Allocate RX queue elements.
*
- * @param rxq
+ * @param rxq_ctrl
* Pointer to RX queue structure.
* @param elts_n
* Number of elements to allocate.
@@ -787,73 +643,67 @@ rxq_free_elts_sp(struct rxq *rxq)
* 0 on success, errno value on failure.
*/
static int
-rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n, struct rte_mbuf **pool)
+rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
+ struct rte_mbuf *(*pool)[])
{
+ const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int i;
- struct rxq_elt (*elts)[elts_n] =
- rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0,
- rxq->socket);
int ret = 0;
- if (elts == NULL) {
- ERROR("%p: can't allocate packets array", (void *)rxq);
- ret = ENOMEM;
- goto error;
- }
- /* For each WR (packet). */
+ /* Iterate on segments. */
for (i = 0; (i != elts_n); ++i) {
- struct rxq_elt *elt = &(*elts)[i];
- struct ibv_sge *sge = &(*elts)[i].sge;
struct rte_mbuf *buf;
+ volatile struct mlx5_wqe_data_seg *scat =
+ &(*rxq_ctrl->rxq.wqes)[i];
if (pool != NULL) {
- buf = *(pool++);
+ buf = (*pool)[i];
assert(buf != NULL);
rte_pktmbuf_reset(buf);
+ rte_pktmbuf_refcnt_update(buf, 1);
} else
- buf = rte_pktmbuf_alloc(rxq->mp);
+ buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
assert(pool == NULL);
- ERROR("%p: empty mbuf pool", (void *)rxq);
+ ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
ret = ENOMEM;
goto error;
}
- elt->buf = buf;
/* Headroom is reserved by rte_pktmbuf_alloc(). */
assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
/* Buffer is supposed to be empty. */
assert(rte_pktmbuf_data_len(buf) == 0);
assert(rte_pktmbuf_pkt_len(buf) == 0);
- /* sge->addr must be able to store a pointer. */
- assert(sizeof(sge->addr) >= sizeof(uintptr_t));
- /* SGE keeps its headroom. */
- sge->addr = (uintptr_t)
- ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM);
- sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM);
- sge->lkey = rxq->mr->lkey;
- /* Redundant check for tailroom. */
- assert(sge->length == rte_pktmbuf_tailroom(buf));
+ assert(!buf->next);
+ /* Only the first segment keeps headroom. */
+ if (i % sges_n)
+ SET_DATA_OFF(buf, 0);
+ PORT(buf) = rxq_ctrl->rxq.port_id;
+ DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
+ PKT_LEN(buf) = DATA_LEN(buf);
+ NB_SEGS(buf) = 1;
+ /* scat->addr must be able to store a pointer. */
+ assert(sizeof(scat->addr) >= sizeof(uintptr_t));
+ *scat = (struct mlx5_wqe_data_seg){
+ .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
+ .byte_count = htonl(DATA_LEN(buf)),
+ .lkey = htonl(rxq_ctrl->mr->lkey),
+ };
+ (*rxq_ctrl->rxq.elts)[i] = buf;
}
- DEBUG("%p: allocated and configured %u single-segment WRs",
- (void *)rxq, elts_n);
- rxq->elts_n = elts_n;
- rxq->elts_head = 0;
- rxq->elts.no_sp = elts;
+ DEBUG("%p: allocated and configured %u segments (max %u packets)",
+ (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
assert(ret == 0);
return 0;
error:
- if (elts != NULL) {
- assert(pool == NULL);
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- struct rxq_elt *elt = &(*elts)[i];
- struct rte_mbuf *buf = elt->buf;
-
- if (buf != NULL)
- rte_pktmbuf_free_seg(buf);
- }
- rte_free(elts);
+ assert(pool == NULL);
+ elts_n = i;
+ for (i = 0; (i != elts_n); ++i) {
+ if ((*rxq_ctrl->rxq.elts)[i] != NULL)
+ rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
+ (*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DEBUG("%p: failed, freed everything", (void *)rxq);
+ DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
assert(ret > 0);
return ret;
}
@@ -861,29 +711,23 @@ error:
/**
* Free RX queue elements.
*
- * @param rxq
+ * @param rxq_ctrl
* Pointer to RX queue structure.
*/
static void
-rxq_free_elts(struct rxq *rxq)
+rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
{
unsigned int i;
- unsigned int elts_n = rxq->elts_n;
- struct rxq_elt (*elts)[elts_n] = rxq->elts.no_sp;
- DEBUG("%p: freeing WRs", (void *)rxq);
- rxq->elts_n = 0;
- rxq->elts.no_sp = NULL;
- if (elts == NULL)
+ DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
+ if (rxq_ctrl->rxq.elts == NULL)
return;
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- struct rxq_elt *elt = &(*elts)[i];
- struct rte_mbuf *buf = elt->buf;
- if (buf != NULL)
- rte_pktmbuf_free_seg(buf);
+ for (i = 0; (i != rxq_ctrl->rxq.elts_n); ++i) {
+ if ((*rxq_ctrl->rxq.elts)[i] != NULL)
+ rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
+ (*rxq_ctrl->rxq.elts)[i] = NULL;
}
- rte_free(elts);
}
/**
@@ -891,65 +735,60 @@ rxq_free_elts(struct rxq *rxq)
*
* Destroy objects, free allocated memory and reset the structure for reuse.
*
- * @param rxq
+ * @param rxq_ctrl
* Pointer to RX queue structure.
*/
void
-rxq_cleanup(struct rxq *rxq)
+rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
{
struct ibv_exp_release_intf_params params;
- DEBUG("cleaning up %p", (void *)rxq);
- if (rxq->sp)
- rxq_free_elts_sp(rxq);
- else
- rxq_free_elts(rxq);
- rxq->poll = NULL;
- rxq->recv = NULL;
- if (rxq->if_wq != NULL) {
- assert(rxq->priv != NULL);
- assert(rxq->priv->ctx != NULL);
- assert(rxq->wq != NULL);
+ DEBUG("cleaning up %p", (void *)rxq_ctrl);
+ rxq_free_elts(rxq_ctrl);
+ if (rxq_ctrl->if_wq != NULL) {
+ assert(rxq_ctrl->priv != NULL);
+ assert(rxq_ctrl->priv->ctx != NULL);
+ assert(rxq_ctrl->wq != NULL);
params = (struct ibv_exp_release_intf_params){
.comp_mask = 0,
};
- claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
- rxq->if_wq,
+ claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx,
+ rxq_ctrl->if_wq,
&params));
}
- if (rxq->if_cq != NULL) {
- assert(rxq->priv != NULL);
- assert(rxq->priv->ctx != NULL);
- assert(rxq->cq != NULL);
+ if (rxq_ctrl->if_cq != NULL) {
+ assert(rxq_ctrl->priv != NULL);
+ assert(rxq_ctrl->priv->ctx != NULL);
+ assert(rxq_ctrl->cq != NULL);
params = (struct ibv_exp_release_intf_params){
.comp_mask = 0,
};
- claim_zero(ibv_exp_release_intf(rxq->priv->ctx,
- rxq->if_cq,
+ claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx,
+ rxq_ctrl->if_cq,
&params));
}
- if (rxq->wq != NULL)
- claim_zero(ibv_exp_destroy_wq(rxq->wq));
- if (rxq->cq != NULL)
- claim_zero(ibv_destroy_cq(rxq->cq));
- if (rxq->rd != NULL) {
+ if (rxq_ctrl->wq != NULL)
+ claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq));
+ if (rxq_ctrl->cq != NULL)
+ claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
+ if (rxq_ctrl->rd != NULL) {
struct ibv_exp_destroy_res_domain_attr attr = {
.comp_mask = 0,
};
- assert(rxq->priv != NULL);
- assert(rxq->priv->ctx != NULL);
- claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx,
- rxq->rd,
+ assert(rxq_ctrl->priv != NULL);
+ assert(rxq_ctrl->priv->ctx != NULL);
+ claim_zero(ibv_exp_destroy_res_domain(rxq_ctrl->priv->ctx,
+ rxq_ctrl->rd,
&attr));
}
- if (rxq->mr != NULL)
- claim_zero(ibv_dereg_mr(rxq->mr));
- memset(rxq, 0, sizeof(*rxq));
+ if (rxq_ctrl->mr != NULL)
+ claim_zero(ibv_dereg_mr(rxq_ctrl->mr));
+ memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
}
/**
- * Reconfigure a RX queue with new parameters.
+ * Reconfigure RX queue buffers.
*
* rxq_rehash() does not allocate mbufs, which, if not done from the right
* thread (such as a control thread), may corrupt the pool.
@@ -957,173 +796,109 @@ rxq_cleanup(struct rxq *rxq)
*
* @param dev
* Pointer to Ethernet device structure.
- * @param rxq
+ * @param rxq_ctrl
* RX queue pointer.
*
* @return
* 0 on success, errno value on failure.
*/
int
-rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
+rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
{
- struct priv *priv = rxq->priv;
- struct rxq tmpl = *rxq;
- unsigned int mbuf_n;
- unsigned int desc_n;
- struct rte_mbuf **pool;
- unsigned int i, k;
+ unsigned int elts_n = rxq_ctrl->rxq.elts_n;
+ unsigned int i;
struct ibv_exp_wq_attr mod;
int err;
- DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq);
- /* Number of descriptors and mbufs currently allocated. */
- desc_n = (tmpl.elts_n * (tmpl.sp ? MLX5_PMD_SGE_WR_N : 1));
- mbuf_n = desc_n;
- /* Toggle RX checksum offload if hardware supports it. */
- if (priv->hw_csum) {
- tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- rxq->csum = tmpl.csum;
- }
- if (priv->hw_csum_l2tun) {
- tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- rxq->csum_l2tun = tmpl.csum_l2tun;
- }
- /* Enable scattered packets support for this queue if necessary. */
- if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
- tmpl.sp = 1;
- desc_n /= MLX5_PMD_SGE_WR_N;
- } else
- tmpl.sp = 0;
- DEBUG("%p: %s scattered packets support (%u WRs)",
- (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n);
- /* If scatter mode is the same as before, nothing to do. */
- if (tmpl.sp == rxq->sp) {
- DEBUG("%p: nothing to do", (void *)dev);
- return 0;
- }
+ DEBUG("%p: rehashing queue %p with %u SGE(s) per packet",
+ (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n);
+ assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n)));
/* From now on, any failure will render the queue unusable.
* Reinitialize WQ. */
mod = (struct ibv_exp_wq_attr){
.attr_mask = IBV_EXP_WQ_ATTR_STATE,
.wq_state = IBV_EXP_WQS_RESET,
};
- err = ibv_exp_modify_wq(tmpl.wq, &mod);
+ err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
if (err) {
ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
assert(err > 0);
return err;
}
- /* Allocate pool. */
- pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0);
- if (pool == NULL) {
- ERROR("%p: cannot allocate memory", (void *)dev);
- return ENOBUFS;
- }
/* Snatch mbufs from original queue. */
- k = 0;
- if (rxq->sp) {
- struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
-
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- struct rxq_elt_sp *elt = &(*elts)[i];
- unsigned int j;
-
- for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) {
- assert(elt->bufs[j] != NULL);
- pool[k++] = elt->bufs[j];
- }
- }
- } else {
- struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
-
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- struct rxq_elt *elt = &(*elts)[i];
- struct rte_mbuf *buf = elt->buf;
+ claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts));
+ for (i = 0; i != elts_n; ++i) {
+ struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i];
- pool[k++] = buf;
- }
+ assert(rte_mbuf_refcnt_read(buf) == 2);
+ rte_pktmbuf_free_seg(buf);
}
- assert(k == mbuf_n);
- tmpl.elts_n = 0;
- tmpl.elts.sp = NULL;
- assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp);
- err = ((tmpl.sp) ?
- rxq_alloc_elts_sp(&tmpl, desc_n, pool) :
- rxq_alloc_elts(&tmpl, desc_n, pool));
- if (err) {
- ERROR("%p: cannot reallocate WRs, aborting", (void *)dev);
- rte_free(pool);
- assert(err > 0);
- return err;
- }
- assert(tmpl.elts_n == desc_n);
- assert(tmpl.elts.sp != NULL);
- rte_free(pool);
- /* Clean up original data. */
- rxq->elts_n = 0;
- rte_free(rxq->elts.sp);
- rxq->elts.sp = NULL;
/* Change queue state to ready. */
mod = (struct ibv_exp_wq_attr){
.attr_mask = IBV_EXP_WQ_ATTR_STATE,
.wq_state = IBV_EXP_WQS_RDY,
};
- err = ibv_exp_modify_wq(tmpl.wq, &mod);
+ err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
if (err) {
ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
(void *)dev, strerror(err));
goto error;
}
- /* Post SGEs. */
- assert(tmpl.if_wq != NULL);
- if (tmpl.sp) {
- struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp;
-
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- err = tmpl.if_wq->recv_sg_list
- (tmpl.wq,
- (*elts)[i].sges,
- RTE_DIM((*elts)[i].sges));
- if (err)
- break;
- }
- } else {
- struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp;
-
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- err = tmpl.if_wq->recv_burst(
- tmpl.wq,
- &(*elts)[i].sge,
- 1);
- if (err)
- break;
- }
- }
- if (err) {
- ERROR("%p: failed to post SGEs with error %d",
- (void *)dev, err);
- /* Set err because it does not contain a valid errno value. */
- err = EIO;
- goto error;
- }
- if (tmpl.sp)
- tmpl.recv = tmpl.if_wq->recv_sg_list;
- else
- tmpl.recv = tmpl.if_wq->recv_burst;
+ /* Update doorbell counter. */
+ rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n;
+ rte_wmb();
+ *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
error:
- *rxq = tmpl;
assert(err >= 0);
return err;
}
/**
+ * Initialize RX queue.
+ *
+ * @param tmpl
+ * Pointer to RX queue control template.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static inline int
+rxq_setup(struct rxq_ctrl *tmpl)
+{
+ struct ibv_cq *ibcq = tmpl->cq;
+ struct mlx5_cq *cq = to_mxxx(cq, cq);
+ struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq);
+ struct rte_mbuf *(*elts)[tmpl->rxq.elts_n] =
+ rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
+
+ if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) {
+ ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
+ "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ return EINVAL;
+ }
+ if (elts == NULL)
+ return ENOMEM;
+ tmpl->rxq.rq_db = rwq->rq.db;
+ tmpl->rxq.cqe_n = ibcq->cqe + 1;
+ tmpl->rxq.cq_ci = 0;
+ tmpl->rxq.rq_ci = 0;
+ tmpl->rxq.cq_db = cq->dbrec;
+ tmpl->rxq.wqes =
+ (volatile struct mlx5_wqe_data_seg (*)[])
+ (uintptr_t)rwq->rq.buff;
+ tmpl->rxq.cqes =
+ (volatile struct mlx5_cqe (*)[])
+ (uintptr_t)cq->active_buf->buf;
+ tmpl->rxq.elts = elts;
+ return 0;
+}
+
+/**
* Configure a RX queue.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param rxq
+ * @param rxq_ctrl
* Pointer to RX queue structure.
* @param desc
* Number of descriptors to configure in queue.
@@ -1138,15 +913,18 @@ error:
* 0 on success, errno value on failure.
*/
int
-rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
{
struct priv *priv = dev->data->dev_private;
- struct rxq tmpl = {
+ struct rxq_ctrl tmpl = {
.priv = priv,
- .mp = mp,
- .socket = socket
+ .socket = socket,
+ .rxq = {
+ .elts_n = desc,
+ .mp = mp,
+ },
};
struct ibv_exp_wq_attr mod;
union {
@@ -1154,44 +932,59 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
struct ibv_exp_cq_init_attr cq;
struct ibv_exp_res_domain_init_attr rd;
struct ibv_exp_wq_init_attr wq;
+ struct ibv_exp_cq_attr cq_attr;
} attr;
enum ibv_exp_query_intf_status status;
- struct rte_mbuf *buf;
+ unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ unsigned int cqe_n = desc - 1;
+ struct rte_mbuf *(*elts)[desc] = NULL;
int ret = 0;
- unsigned int i;
- unsigned int cq_size = desc;
(void)conf; /* Thresholds configuration (ignored). */
- if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) {
- ERROR("%p: invalid number of RX descriptors (must be a"
- " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N);
- return EINVAL;
+ /* Enable scattered packets support for this queue if necessary. */
+ assert(mb_len >= RTE_PKTMBUF_HEADROOM);
+ if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ (mb_len - RTE_PKTMBUF_HEADROOM))) {
+ unsigned int size =
+ RTE_PKTMBUF_HEADROOM +
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int sges_n;
+
+ /*
+ * Determine the number of SGEs needed for a full packet
+ * and round it to the next power of two.
+ */
+ sges_n = log2above((size / mb_len) + !!(size % mb_len));
+ tmpl.rxq.sges_n = sges_n;
+ /* Make sure rxq.sges_n did not overflow. */
+ size = mb_len * (1 << tmpl.rxq.sges_n);
+ size -= RTE_PKTMBUF_HEADROOM;
+ if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ ERROR("%p: too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u",
+ (void *)dev,
+ 1 << sges_n,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ return EOVERFLOW;
+ }
}
- /* Get mbuf length. */
- buf = rte_pktmbuf_alloc(mp);
- if (buf == NULL) {
- ERROR("%p: unable to allocate mbuf", (void *)dev);
- return ENOMEM;
+ DEBUG("%p: maximum number of segments per packet: %u",
+ (void *)dev, 1 << tmpl.rxq.sges_n);
+ if (desc % (1 << tmpl.rxq.sges_n)) {
+ ERROR("%p: number of RX queue descriptors (%u) is not a"
+ " multiple of SGEs per packet (%u)",
+ (void *)dev,
+ desc,
+ 1 << tmpl.rxq.sges_n);
+ return EINVAL;
}
- tmpl.mb_len = buf->buf_len;
- assert((rte_pktmbuf_headroom(buf) +
- rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
- assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
- rte_pktmbuf_free(buf);
/* Toggle RX checksum offload if hardware supports it. */
if (priv->hw_csum)
- tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ tmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
if (priv->hw_csum_l2tun)
- tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
- /* Enable scattered packets support for this queue if necessary. */
- if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) {
- tmpl.sp = 1;
- desc /= MLX5_PMD_SGE_WR_N;
- }
- DEBUG("%p: %s scattered packets support (%u WRs)",
- (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc);
+ tmpl.rxq.csum_l2tun =
+ !!dev->data->dev_conf.rxmode.hw_ip_checksum;
/* Use the entire RX mempool as the memory region. */
tmpl.mr = mlx5_mp2mr(priv->pd, mp);
if (tmpl.mr == NULL) {
@@ -1217,7 +1010,12 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
.res_domain = tmpl.rd,
};
- tmpl.cq = ibv_exp_create_cq(priv->ctx, cq_size, NULL, NULL, 0,
+ if (priv->cqe_comp) {
+ attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;
+ attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
+ cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
+ }
+ tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, NULL, 0,
&attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
@@ -1230,64 +1028,51 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.max_sge);
/* Configure VLAN stripping. */
- tmpl.vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip;
+ tmpl.rxq.vlan_strip = (priv->hw_vlan_strip &&
+ !!dev->data->dev_conf.rxmode.hw_vlan_strip);
attr.wq = (struct ibv_exp_wq_init_attr){
.wq_context = NULL, /* Could be useful in the future. */
.wq_type = IBV_EXP_WQT_RQ,
/* Max number of outstanding WRs. */
- .max_recv_wr = ((priv->device_attr.max_qp_wr < (int)cq_size) ?
- priv->device_attr.max_qp_wr :
- (int)cq_size),
+ .max_recv_wr = desc >> tmpl.rxq.sges_n,
/* Max number of scatter/gather elements in a WR. */
- .max_recv_sge = ((priv->device_attr.max_sge <
- MLX5_PMD_SGE_WR_N) ?
- priv->device_attr.max_sge :
- MLX5_PMD_SGE_WR_N),
+ .max_recv_sge = 1 << tmpl.rxq.sges_n,
.pd = priv->pd,
.cq = tmpl.cq,
.comp_mask =
IBV_EXP_CREATE_WQ_RES_DOMAIN |
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
IBV_EXP_CREATE_WQ_VLAN_OFFLOADS |
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
0,
.res_domain = tmpl.rd,
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
- .vlan_offloads = (tmpl.vlan_strip ?
+ .vlan_offloads = (tmpl.rxq.vlan_strip ?
IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :
0),
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
};
-
-#ifdef HAVE_VERBS_FCS
/* By default, FCS (CRC) is stripped by hardware. */
if (dev->data->dev_conf.rxmode.hw_strip_crc) {
- tmpl.crc_present = 0;
+ tmpl.rxq.crc_present = 0;
} else if (priv->hw_fcs_strip) {
/* Ask HW/Verbs to leave CRC in place when supported. */
attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS;
attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;
- tmpl.crc_present = 1;
+ tmpl.rxq.crc_present = 1;
} else {
WARN("%p: CRC stripping has been disabled but will still"
" be performed by hardware, make sure MLNX_OFED and"
" firmware are up to date",
(void *)dev);
- tmpl.crc_present = 0;
+ tmpl.rxq.crc_present = 0;
}
DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
" incoming frames to hide it",
(void *)dev,
- tmpl.crc_present ? "disabled" : "enabled",
- tmpl.crc_present << 2);
-#endif /* HAVE_VERBS_FCS */
-
-#ifdef HAVE_VERBS_RX_END_PADDING
+ tmpl.rxq.crc_present ? "disabled" : "enabled",
+ tmpl.rxq.crc_present << 2);
if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING"))
; /* Nothing else to do. */
else if (priv->hw_padding) {
INFO("%p: enabling packet padding on queue %p",
- (void *)dev, (void *)rxq);
+ (void *)dev, (void *)rxq_ctrl);
attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING;
attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS;
} else
@@ -1295,7 +1080,6 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
" supported, make sure MLNX_OFED and firmware are"
" up to date",
(void *)dev);
-#endif /* HAVE_VERBS_RX_END_PADDING */
tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq);
if (tmpl.wq == NULL) {
@@ -1304,23 +1088,25 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
(void *)dev, strerror(ret));
goto error;
}
- if (tmpl.sp)
- ret = rxq_alloc_elts_sp(&tmpl, desc, NULL);
- else
- ret = rxq_alloc_elts(&tmpl, desc, NULL);
- if (ret) {
- ERROR("%p: RXQ allocation failed: %s",
- (void *)dev, strerror(ret));
+ /*
+ * Make sure number of WRs*SGEs match expectations since a queue
+ * cannot allocate more than "desc" buffers.
+ */
+ if (((int)attr.wq.max_recv_wr != (desc >> tmpl.rxq.sges_n)) ||
+ ((int)attr.wq.max_recv_sge != (1 << tmpl.rxq.sges_n))) {
+ ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
+ (void *)dev,
+ (desc >> tmpl.rxq.sges_n), (1 << tmpl.rxq.sges_n),
+ attr.wq.max_recv_wr, attr.wq.max_recv_sge);
+ ret = EINVAL;
goto error;
}
/* Save port ID. */
- tmpl.port_id = dev->data->port_id;
- DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
+ tmpl.rxq.port_id = dev->data->port_id;
+ DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id);
attr.params = (struct ibv_exp_query_intf_params){
.intf_scope = IBV_EXP_INTF_GLOBAL,
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
.intf_version = 1,
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
.intf = IBV_EXP_INTF_CQ,
.obj = tmpl.cq,
};
@@ -1352,56 +1138,47 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
(void *)dev, strerror(ret));
goto error;
}
- /* Post SGEs. */
- if (tmpl.sp) {
- struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp;
-
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- ret = tmpl.if_wq->recv_sg_list
- (tmpl.wq,
- (*elts)[i].sges,
- RTE_DIM((*elts)[i].sges));
- if (ret)
- break;
- }
- } else {
- struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp;
-
- for (i = 0; (i != RTE_DIM(*elts)); ++i) {
- ret = tmpl.if_wq->recv_burst(
- tmpl.wq,
- &(*elts)[i].sge,
- 1);
- if (ret)
- break;
- }
+ ret = rxq_setup(&tmpl);
+ if (ret) {
+ ERROR("%p: cannot initialize RX queue structure: %s",
+ (void *)dev, strerror(ret));
+ goto error;
}
+ /* Reuse buffers from original queue if possible. */
+ if (rxq_ctrl->rxq.elts_n) {
+ assert(rxq_ctrl->rxq.elts_n == desc);
+ assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts);
+ ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
+ } else
+ ret = rxq_alloc_elts(&tmpl, desc, NULL);
if (ret) {
- ERROR("%p: failed to post SGEs with error %d",
- (void *)dev, ret);
- /* Set ret because it does not contain a valid errno value. */
- ret = EIO;
+ ERROR("%p: RXQ allocation failed: %s",
+ (void *)dev, strerror(ret));
goto error;
}
/* Clean up rxq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
- rxq_cleanup(rxq);
- *rxq = tmpl;
- DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
+ DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl);
+ rxq_cleanup(rxq_ctrl);
+ /* Move mbuf pointers to dedicated storage area in RX queue. */
+ elts = (void *)(rxq_ctrl + 1);
+ rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts));
+#ifndef NDEBUG
+ memset(tmpl.rxq.elts, 0x55, sizeof(*elts));
+#endif
+ rte_free(tmpl.rxq.elts);
+ tmpl.rxq.elts = elts;
+ *rxq_ctrl = tmpl;
+ /* Update doorbell counter. */
+ rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n;
+ rte_wmb();
+ *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
+ DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
assert(ret == 0);
- /* Assign function in queue. */
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
- rxq->poll = rxq->if_cq->poll_length_flags_cvlan;
-#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
- rxq->poll = rxq->if_cq->poll_length_flags;
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
- if (rxq->sp)
- rxq->recv = rxq->if_wq->recv_sg_list;
- else
- rxq->recv = rxq->if_wq->recv_burst;
return 0;
error:
+ elts = tmpl.rxq.elts;
rxq_cleanup(&tmpl);
+ rte_free(elts);
assert(ret > 0);
return ret;
}
@@ -1432,12 +1209,19 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
{
struct priv *priv = dev->data->dev_private;
struct rxq *rxq = (*priv->rxqs)[idx];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
int ret;
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
priv_lock(priv);
+ if (!rte_is_power_of_2(desc)) {
+ desc = 1 << log2above(desc);
+ WARN("%p: increased number of descriptors in RX queue %u"
+ " to the next power of two (%d)",
+ (void *)dev, idx, desc);
+ }
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
if (idx >= priv->rxqs_n) {
@@ -1454,29 +1238,28 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return -EEXIST;
}
(*priv->rxqs)[idx] = NULL;
- rxq_cleanup(rxq);
+ rxq_cleanup(rxq_ctrl);
} else {
- rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
- if (rxq == NULL) {
+ rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (rxq_ctrl == NULL) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
priv_unlock(priv);
return -ENOMEM;
}
}
- ret = rxq_setup(dev, rxq, desc, socket, conf, mp);
+ ret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp);
if (ret)
- rte_free(rxq);
+ rte_free(rxq_ctrl);
else {
- rxq->stats.idx = idx;
+ rxq_ctrl->rxq.stats.idx = idx;
DEBUG("%p: adding RX queue %p to list",
- (void *)dev, (void *)rxq);
- (*priv->rxqs)[idx] = rxq;
+ (void *)dev, (void *)rxq_ctrl);
+ (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
/* Update receive callback. */
- if (rxq->sp)
- dev->rx_pkt_burst = mlx5_rx_burst_sp;
- else
- dev->rx_pkt_burst = mlx5_rx_burst;
+ dev->rx_pkt_burst = mlx5_rx_burst;
}
priv_unlock(priv);
return -ret;
@@ -1492,6 +1275,7 @@ void
mlx5_rx_queue_release(void *dpdk_rxq)
{
struct rxq *rxq = (struct rxq *)dpdk_rxq;
+ struct rxq_ctrl *rxq_ctrl;
struct priv *priv;
unsigned int i;
@@ -1500,17 +1284,18 @@ mlx5_rx_queue_release(void *dpdk_rxq)
if (rxq == NULL)
return;
- priv = rxq->priv;
+ rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ priv = rxq_ctrl->priv;
priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] == rxq) {
DEBUG("%p: removing RX queue %p from list",
- (void *)priv->dev, (void *)rxq);
+ (void *)priv->dev, (void *)rxq_ctrl);
(*priv->rxqs)[i] = NULL;
break;
}
- rxq_cleanup(rxq);
- rte_free(rxq);
+ rxq_cleanup(rxq_ctrl);
+ rte_free(rxq_ctrl);
priv_unlock(priv);
}
@@ -1535,7 +1320,8 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
struct rxq *rxq = dpdk_rxq;
- struct priv *priv = mlx5_secondary_data_setup(rxq->priv);
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct priv *priv = mlx5_secondary_data_setup(rxq_ctrl->priv);
struct priv *primary_priv;
unsigned int index;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 9d1380a0..0c352f3f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -42,6 +42,8 @@
#pragma GCC diagnostic ignored "-pedantic"
#endif
#include <infiniband/verbs.h>
+#include <infiniband/mlx5_hw.h>
+#include <infiniband/arch.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -55,7 +57,7 @@
#include <rte_prefetch.h>
#include <rte_common.h>
#include <rte_branch_prediction.h>
-#include <rte_memory.h>
+#include <rte_ether.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -65,125 +67,161 @@
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+
+#ifndef NDEBUG
/**
- * Manage TX completions.
- *
- * When sending a burst, mlx5_tx_burst() posts several WRs.
- * To improve performance, a completion event is only required once every
- * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
- * for other WRs, but this information would not be used anyway.
+ * Verify or set magic value in CQE.
*
- * @param txq
- * Pointer to TX queue structure.
+ * @param cqe
+ * Pointer to CQE.
*
* @return
- * 0 on success, -1 on failure.
+ * 0 the first time.
*/
-static int
-txq_complete(struct txq *txq)
+static inline int
+check_cqe64_seen(volatile struct mlx5_cqe64 *cqe)
{
- unsigned int elts_comp = txq->elts_comp;
- unsigned int elts_tail = txq->elts_tail;
- unsigned int elts_free = txq->elts_tail;
- const unsigned int elts_n = txq->elts_n;
- int wcs_n;
+ static const uint8_t magic[] = "seen";
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd40)] = &cqe->rsvd40;
+ int ret = 1;
+ unsigned int i;
- if (unlikely(elts_comp == 0))
- return 0;
-#ifdef DEBUG_SEND
- DEBUG("%p: processing %u work requests completions",
- (void *)txq, elts_comp);
-#endif
- wcs_n = txq->poll_cnt(txq->cq, elts_comp);
- if (unlikely(wcs_n == 0))
- return 0;
- if (unlikely(wcs_n < 0)) {
- DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
- (void *)txq, wcs_n);
- return -1;
- }
- elts_comp -= wcs_n;
- assert(elts_comp <= txq->elts_comp);
- /*
- * Assume WC status is successful as nothing can be done about it
- * anyway.
- */
- elts_tail += wcs_n * txq->elts_comp_cd_init;
- if (elts_tail >= elts_n)
- elts_tail -= elts_n;
+ for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i)
+ if (!ret || (*buf)[i] != magic[i]) {
+ ret = 0;
+ (*buf)[i] = magic[i];
+ }
+ return ret;
+}
- while (elts_free != elts_tail) {
- struct txq_elt *elt = &(*txq->elts)[elts_free];
- unsigned int elts_free_next =
- (((elts_free + 1) == elts_n) ? 0 : elts_free + 1);
- struct rte_mbuf *tmp = elt->buf;
- struct txq_elt *elt_next = &(*txq->elts)[elts_free_next];
+#endif /* NDEBUG */
+
+static inline int
+check_cqe64(volatile struct mlx5_cqe64 *cqe,
+ unsigned int cqes_n, const uint16_t ci)
+ __attribute__((always_inline));
+
+/**
+ * Check whether CQE is valid.
+ *
+ * @param cqe
+ * Pointer to CQE.
+ * @param cqes_n
+ * Size of completion queue.
+ * @param ci
+ * Consumer index.
+ *
+ * @return
+ * 0 on success, 1 on failure.
+ */
+static inline int
+check_cqe64(volatile struct mlx5_cqe64 *cqe,
+ unsigned int cqes_n, const uint16_t ci)
+{
+ uint16_t idx = ci & cqes_n;
+ uint8_t op_own = cqe->op_own;
+ uint8_t op_owner = MLX5_CQE_OWNER(op_own);
+ uint8_t op_code = MLX5_CQE_OPCODE(op_own);
+ if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID)))
+ return 1; /* No CQE. */
#ifndef NDEBUG
- /* Poisoning. */
- memset(elt, 0x66, sizeof(*elt));
-#endif
- RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
- /* Faster than rte_pktmbuf_free(). */
- do {
- struct rte_mbuf *next = NEXT(tmp);
+ if ((op_code == MLX5_CQE_RESP_ERR) ||
+ (op_code == MLX5_CQE_REQ_ERR)) {
+ volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe;
+ uint8_t syndrome = err_cqe->syndrome;
- rte_pktmbuf_free_seg(tmp);
- tmp = next;
- } while (tmp != NULL);
- elts_free = elts_free_next;
+ if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
+ (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
+ return 0;
+ if (!check_cqe64_seen(cqe))
+ ERROR("unexpected CQE error %u (0x%02x)"
+ " syndrome 0x%02x",
+ op_code, op_code, syndrome);
+ return 1;
+ } else if ((op_code != MLX5_CQE_RESP_SEND) &&
+ (op_code != MLX5_CQE_REQ)) {
+ if (!check_cqe64_seen(cqe))
+ ERROR("unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
+ return 1;
}
-
- txq->elts_tail = elts_tail;
- txq->elts_comp = elts_comp;
+#endif /* NDEBUG */
return 0;
}
-/* For best performance, this function should not be inlined. */
-struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *)
- __attribute__((noinline));
-
/**
- * Register mempool as a memory region.
+ * Manage TX completions.
*
- * @param pd
- * Pointer to protection domain.
- * @param mp
- * Pointer to memory pool.
+ * When sending a burst, mlx5_tx_burst() posts several WRs.
*
- * @return
- * Memory region pointer, NULL in case of error.
+ * @param txq
+ * Pointer to TX queue structure.
*/
-struct ibv_mr *
-mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp)
+static void
+txq_complete(struct txq *txq)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start = mp->elt_va_start;
- uintptr_t end = mp->elt_va_end;
- unsigned int i;
+ const unsigned int elts_n = txq->elts_n;
+ const unsigned int cqe_n = txq->cqe_n;
+ const unsigned int cqe_cnt = cqe_n - 1;
+ uint16_t elts_free = txq->elts_tail;
+ uint16_t elts_tail;
+ uint16_t cq_ci = txq->cq_ci;
+ volatile struct mlx5_cqe64 *cqe = NULL;
+ volatile union mlx5_wqe *wqe;
+
+ do {
+ volatile struct mlx5_cqe64 *tmp;
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (const void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
-
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
+ tmp = &(*txq->cqes)[cq_ci & cqe_cnt].cqe64;
+ if (check_cqe64(tmp, cqe_n, cq_ci))
+ break;
+ cqe = tmp;
+#ifndef NDEBUG
+ if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) {
+ if (!check_cqe64_seen(cqe))
+ ERROR("unexpected compressed CQE, TX stopped");
+ return;
+ }
+ if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
+ (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
+ if (!check_cqe64_seen(cqe))
+ ERROR("unexpected error CQE, TX stopped");
+ return;
+ }
+#endif /* NDEBUG */
+ ++cq_ci;
+ } while (1);
+ if (unlikely(cqe == NULL))
+ return;
+ wqe = &(*txq->wqes)[htons(cqe->wqe_counter) & (txq->wqe_n - 1)];
+ elts_tail = wqe->wqe.ctrl.data[3];
+ assert(elts_tail < txq->wqe_n);
+ /* Free buffers. */
+ while (elts_free != elts_tail) {
+ struct rte_mbuf *elt = (*txq->elts)[elts_free];
+ unsigned int elts_free_next =
+ (elts_free + 1) & (elts_n - 1);
+ struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next];
+
+#ifndef NDEBUG
+ /* Poisoning. */
+ memset(&(*txq->elts)[elts_free],
+ 0x66,
+ sizeof((*txq->elts)[elts_free]));
+#endif
+ RTE_MBUF_PREFETCH_TO_FREE(elt_next);
+ /* Only one segment needs to be freed. */
+ rte_pktmbuf_free_seg(elt);
+ elts_free = elts_free_next;
}
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (const void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- return ibv_reg_mr(pd,
- (void *)start,
- end - start,
- IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
+ txq->cq_ci = cq_ci;
+ txq->elts_tail = elts_tail;
+ /* Update the consumer index. */
+ rte_wmb();
+ *txq->cq_db = htonl(cq_ci);
}
/**
@@ -204,6 +242,10 @@ txq_mb2mp(struct rte_mbuf *buf)
return buf->pool;
}
+static inline uint32_t
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+ __attribute__((always_inline));
+
/**
* Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
* Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
@@ -217,11 +259,11 @@ txq_mb2mp(struct rte_mbuf *buf)
* @return
* mr->lkey on success, (uint32_t)-1 on failure.
*/
-static uint32_t
-txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
+static inline uint32_t
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
{
unsigned int i;
- struct ibv_mr *mr;
+ uint32_t lkey = (uint32_t)-1;
for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
if (unlikely(txq->mp2mr[i].mp == NULL)) {
@@ -230,295 +272,681 @@ txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
}
if (txq->mp2mr[i].mp == mp) {
assert(txq->mp2mr[i].lkey != (uint32_t)-1);
- assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
- return txq->mp2mr[i].lkey;
+ assert(htonl(txq->mp2mr[i].mr->lkey) ==
+ txq->mp2mr[i].lkey);
+ lkey = txq->mp2mr[i].lkey;
+ break;
}
}
- /* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq, mp->name, (const void *)mp);
- mr = mlx5_mp2mr(txq->priv->pd, mp);
- if (unlikely(mr == NULL)) {
- DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
- (void *)txq);
- return (uint32_t)-1;
- }
- if (unlikely(i == RTE_DIM(txq->mp2mr))) {
- /* Table is full, remove oldest entry. */
- DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
- (void *)txq);
- --i;
- claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
- memmove(&txq->mp2mr[0], &txq->mp2mr[1],
- (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
- }
- /* Store the new entry. */
- txq->mp2mr[i].mp = mp;
- txq->mp2mr[i].mr = mr;
- txq->mp2mr[i].lkey = mr->lkey;
- DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
- return txq->mp2mr[i].lkey;
+ if (unlikely(lkey == (uint32_t)-1))
+ lkey = txq_mp2mr_reg(txq, mp, i);
+ return lkey;
}
-struct txq_mp2mr_mbuf_check_data {
- const struct rte_mempool *mp;
- int ret;
-};
-
/**
- * Callback function for rte_mempool_obj_iter() to check whether a given
- * mempool object looks like a mbuf.
- *
- * @param[in, out] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer
- * and return value.
- * @param[in] start
- * Object start address.
- * @param[in] end
- * Object end address.
- * @param index
- * Unused.
+ * Write a regular WQE.
*
- * @return
- * Nonzero value when object is not a mbuf.
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the WQE to fill.
+ * @param addr
+ * Buffer data address.
+ * @param length
+ * Packet length.
+ * @param lkey
+ * Memory region lkey.
*/
-static void
-txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
- uint32_t index __rte_unused)
+static inline void
+mlx5_wqe_write(struct txq *txq, volatile union mlx5_wqe *wqe,
+ uintptr_t addr, uint32_t length, uint32_t lkey)
{
- struct txq_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf =
- (void *)((uintptr_t)start + data->mp->header_size);
-
- (void)index;
- /* Check whether mbuf structure fits element size and whether mempool
- * pointer is valid. */
- if (((uintptr_t)end >= (uintptr_t)(buf + 1)) &&
- (buf->pool == data->mp))
- data->ret = 0;
- else
- data->ret = -1;
+ wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
+ wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4);
+ wqe->wqe.ctrl.data[2] = 0;
+ wqe->wqe.ctrl.data[3] = 0;
+ wqe->inl.eseg.rsvd0 = 0;
+ wqe->inl.eseg.rsvd1 = 0;
+ wqe->inl.eseg.mss = 0;
+ wqe->inl.eseg.rsvd2 = 0;
+ wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE);
+ /* Copy the first 16 bytes into inline header. */
+ rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start,
+ (uint8_t *)(uintptr_t)addr,
+ MLX5_ETH_INLINE_HEADER_SIZE);
+ addr += MLX5_ETH_INLINE_HEADER_SIZE;
+ length -= MLX5_ETH_INLINE_HEADER_SIZE;
+ /* Store remaining data in data segment. */
+ wqe->wqe.dseg.byte_count = htonl(length);
+ wqe->wqe.dseg.lkey = lkey;
+ wqe->wqe.dseg.addr = htonll(addr);
+ /* Increment consumer index. */
+ ++txq->wqe_ci;
}
/**
- * Iterator function for rte_mempool_walk() to register existing mempools and
- * fill the MP to MR cache of a TX queue.
+ * Write a regular WQE with VLAN.
*
- * @param[in] mp
- * Memory Pool to register.
- * @param *arg
+ * @param txq
* Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the WQE to fill.
+ * @param addr
+ * Buffer data address.
+ * @param length
+ * Packet length.
+ * @param lkey
+ * Memory region lkey.
+ * @param vlan_tci
+ * VLAN field to insert in packet.
*/
-void
-txq_mp2mr_iter(const struct rte_mempool *mp, void *arg)
+static inline void
+mlx5_wqe_write_vlan(struct txq *txq, volatile union mlx5_wqe *wqe,
+ uintptr_t addr, uint32_t length, uint32_t lkey,
+ uint16_t vlan_tci)
{
- struct txq *txq = arg;
- struct txq_mp2mr_mbuf_check_data data = {
- .mp = mp,
- .ret = -1,
- };
+ uint32_t vlan = htonl(0x81000000 | vlan_tci);
- /* Discard empty mempools. */
- if (mp->size == 0)
- return;
- /* Register mempool only if the first element looks like a mbuf. */
- rte_mempool_obj_iter((void *)mp->elt_va_start,
- 1,
- mp->header_size + mp->elt_size + mp->trailer_size,
- 1,
- mp->elt_pa,
- mp->pg_num,
- mp->pg_shift,
- txq_mp2mr_mbuf_check,
- &data);
- if (data.ret)
- return;
- txq_mp2mr(txq, mp);
+ wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
+ wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4);
+ wqe->wqe.ctrl.data[2] = 0;
+ wqe->wqe.ctrl.data[3] = 0;
+ wqe->inl.eseg.rsvd0 = 0;
+ wqe->inl.eseg.rsvd1 = 0;
+ wqe->inl.eseg.mss = 0;
+ wqe->inl.eseg.rsvd2 = 0;
+ wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE);
+ /*
+ * Copy 12 bytes of source & destination MAC address.
+ * Copy 4 bytes of VLAN.
+ * Copy 2 bytes of Ether type.
+ */
+ rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start,
+ (uint8_t *)(uintptr_t)addr, 12);
+ rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 12),
+ &vlan, sizeof(vlan));
+ rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 16),
+ (uint8_t *)((uintptr_t)addr + 12), 2);
+ addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
+ length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
+ /* Store remaining data in data segment. */
+ wqe->wqe.dseg.byte_count = htonl(length);
+ wqe->wqe.dseg.lkey = lkey;
+ wqe->wqe.dseg.addr = htonll(addr);
+ /* Increment consumer index. */
+ ++txq->wqe_ci;
}
/**
- * Insert VLAN using mbuf headroom space.
+ * Write a inline WQE.
*
- * @param buf
- * Buffer for VLAN insertion.
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the WQE to fill.
+ * @param addr
+ * Buffer data address.
+ * @param length
+ * Packet length.
+ * @param lkey
+ * Memory region lkey.
+ */
+static inline void
+mlx5_wqe_write_inline(struct txq *txq, volatile union mlx5_wqe *wqe,
+ uintptr_t addr, uint32_t length)
+{
+ uint32_t size;
+ uint16_t wqe_cnt = txq->wqe_n - 1;
+ uint16_t wqe_ci = txq->wqe_ci + 1;
+
+ /* Copy the first 16 bytes into inline header. */
+ rte_memcpy((void *)(uintptr_t)wqe->inl.eseg.inline_hdr_start,
+ (void *)(uintptr_t)addr,
+ MLX5_ETH_INLINE_HEADER_SIZE);
+ addr += MLX5_ETH_INLINE_HEADER_SIZE;
+ length -= MLX5_ETH_INLINE_HEADER_SIZE;
+ size = 3 + ((4 + length + 15) / 16);
+ wqe->inl.byte_cnt = htonl(length | MLX5_INLINE_SEG);
+ rte_memcpy((void *)(uintptr_t)&wqe->inl.data[0],
+ (void *)addr, MLX5_WQE64_INL_DATA);
+ addr += MLX5_WQE64_INL_DATA;
+ length -= MLX5_WQE64_INL_DATA;
+ while (length) {
+ volatile union mlx5_wqe *wqe_next =
+ &(*txq->wqes)[wqe_ci & wqe_cnt];
+ uint32_t copy_bytes = (length > sizeof(*wqe)) ?
+ sizeof(*wqe) :
+ length;
+
+ rte_mov64((uint8_t *)(uintptr_t)&wqe_next->data[0],
+ (uint8_t *)addr);
+ addr += copy_bytes;
+ length -= copy_bytes;
+ ++wqe_ci;
+ }
+ assert(size < 64);
+ wqe->inl.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
+ wqe->inl.ctrl.data[1] = htonl(txq->qp_num_8s | size);
+ wqe->inl.ctrl.data[2] = 0;
+ wqe->inl.ctrl.data[3] = 0;
+ wqe->inl.eseg.rsvd0 = 0;
+ wqe->inl.eseg.rsvd1 = 0;
+ wqe->inl.eseg.mss = 0;
+ wqe->inl.eseg.rsvd2 = 0;
+ wqe->inl.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE);
+ /* Increment consumer index. */
+ txq->wqe_ci = wqe_ci;
+}
+
+/**
+ * Write a inline WQE with VLAN.
*
- * @return
- * 0 on success, errno value on failure.
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the WQE to fill.
+ * @param addr
+ * Buffer data address.
+ * @param length
+ * Packet length.
+ * @param lkey
+ * Memory region lkey.
+ * @param vlan_tci
+ * VLAN field to insert in packet.
*/
-static inline int
-insert_vlan_sw(struct rte_mbuf *buf)
+static inline void
+mlx5_wqe_write_inline_vlan(struct txq *txq, volatile union mlx5_wqe *wqe,
+ uintptr_t addr, uint32_t length, uint16_t vlan_tci)
{
- uintptr_t addr;
- uint32_t vlan;
- uint16_t head_room_len = rte_pktmbuf_headroom(buf);
+ uint32_t size;
+ uint32_t wqe_cnt = txq->wqe_n - 1;
+ uint16_t wqe_ci = txq->wqe_ci + 1;
+ uint32_t vlan = htonl(0x81000000 | vlan_tci);
- if (head_room_len < 4)
- return EINVAL;
+ /*
+ * Copy 12 bytes of source & destination MAC address.
+ * Copy 4 bytes of VLAN.
+ * Copy 2 bytes of Ether type.
+ */
+ rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start,
+ (uint8_t *)addr, 12);
+ rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start + 12,
+ &vlan, sizeof(vlan));
+ rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start + 16,
+ ((uint8_t *)addr + 12), 2);
+ addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
+ length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
+ size = (sizeof(wqe->inl.ctrl.ctrl) +
+ sizeof(wqe->inl.eseg) +
+ sizeof(wqe->inl.byte_cnt) +
+ length + 15) / 16;
+ wqe->inl.byte_cnt = htonl(length | MLX5_INLINE_SEG);
+ rte_memcpy((void *)(uintptr_t)&wqe->inl.data[0],
+ (void *)addr, MLX5_WQE64_INL_DATA);
+ addr += MLX5_WQE64_INL_DATA;
+ length -= MLX5_WQE64_INL_DATA;
+ while (length) {
+ volatile union mlx5_wqe *wqe_next =
+ &(*txq->wqes)[wqe_ci & wqe_cnt];
+ uint32_t copy_bytes = (length > sizeof(*wqe)) ?
+ sizeof(*wqe) :
+ length;
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- vlan = htonl(0x81000000 | buf->vlan_tci);
- memmove((void *)(addr - 4), (void *)addr, 12);
- memcpy((void *)(addr + 8), &vlan, sizeof(vlan));
+ rte_mov64((uint8_t *)(uintptr_t)&wqe_next->data[0],
+ (uint8_t *)addr);
+ addr += copy_bytes;
+ length -= copy_bytes;
+ ++wqe_ci;
+ }
+ assert(size < 64);
+ wqe->inl.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
+ wqe->inl.ctrl.data[1] = htonl(txq->qp_num_8s | size);
+ wqe->inl.ctrl.data[2] = 0;
+ wqe->inl.ctrl.data[3] = 0;
+ wqe->inl.eseg.rsvd0 = 0;
+ wqe->inl.eseg.rsvd1 = 0;
+ wqe->inl.eseg.mss = 0;
+ wqe->inl.eseg.rsvd2 = 0;
+ wqe->inl.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE);
+ /* Increment consumer index. */
+ txq->wqe_ci = wqe_ci;
+}
+
+/**
+ * Ring TX queue doorbell.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ */
+static inline void
+mlx5_tx_dbrec(struct txq *txq)
+{
+ uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset);
+ uint32_t data[4] = {
+ htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
+ htonl(txq->qp_num_8s),
+ 0,
+ 0,
+ };
+ rte_wmb();
+ *txq->qp_db = htonl(txq->wqe_ci);
+ /* Ensure ordering between DB record and BF copy. */
+ rte_wmb();
+ rte_mov16(dst, (uint8_t *)data);
+ txq->bf_offset ^= txq->bf_buf_size;
+}
- SET_DATA_OFF(buf, head_room_len - 4);
- DATA_LEN(buf) += 4;
+/**
+ * Prefetch a CQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param cqe_ci
+ * CQE consumer index.
+ */
+static inline void
+tx_prefetch_cqe(struct txq *txq, uint16_t ci)
+{
+ volatile struct mlx5_cqe64 *cqe;
- return 0;
+ cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64;
+ rte_prefetch0(cqe);
}
-#if MLX5_PMD_SGE_WR_N > 1
+/**
+ * Prefetch a WQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe_ci
+ * WQE consumer index.
+ */
+static inline void
+tx_prefetch_wqe(struct txq *txq, uint16_t ci)
+{
+ volatile union mlx5_wqe *wqe;
+
+ wqe = &(*txq->wqes)[ci & (txq->wqe_n - 1)];
+ rte_prefetch0(wqe);
+}
/**
- * Copy scattered mbuf contents to a single linear buffer.
+ * DPDK callback for TX.
*
- * @param[out] linear
- * Linear output buffer.
- * @param[in] buf
- * Scattered input buffer.
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
*
* @return
- * Number of bytes copied to the output buffer or 0 if not large enough.
+ * Number of packets successfully transmitted (<= pkts_n).
*/
-static unsigned int
-linearize_mbuf(linear_t *linear, struct rte_mbuf *buf)
+uint16_t
+mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- unsigned int size = 0;
- unsigned int offset;
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const unsigned int elts_n = txq->elts_n;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ unsigned int max;
+ unsigned int comp;
+ volatile union mlx5_wqe *wqe = NULL;
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Prefetch first packet cacheline. */
+ tx_prefetch_cqe(txq, txq->cq_ci);
+ tx_prefetch_cqe(txq, txq->cq_ci + 1);
+ rte_prefetch0(*pkts);
+ /* Start processing. */
+ txq_complete(txq);
+ max = (elts_n - (elts_head - txq->elts_tail));
+ if (max > elts_n)
+ max -= elts_n;
do {
- unsigned int len = DATA_LEN(buf);
+ struct rte_mbuf *buf = *(pkts++);
+ unsigned int elts_head_next;
+ uintptr_t addr;
+ uint32_t length;
+ uint32_t lkey;
+ unsigned int segs_n = buf->nb_segs;
+ volatile struct mlx5_wqe_data_seg *dseg;
+ unsigned int ds = sizeof(*wqe) / 16;
- offset = size;
- size += len;
- if (unlikely(size > sizeof(*linear)))
- return 0;
- memcpy(&(*linear)[offset],
- rte_pktmbuf_mtod(buf, uint8_t *),
- len);
- buf = NEXT(buf);
- } while (buf != NULL);
- return size;
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max < segs_n + 1)
+ break;
+ max -= segs_n;
+ --pkts_n;
+ elts_head_next = (elts_head + 1) & (elts_n - 1);
+ wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
+ dseg = &wqe->wqe.dseg;
+ rte_prefetch0(wqe);
+ if (pkts_n)
+ rte_prefetch0(*pkts);
+ /* Retrieve buffer information. */
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ length = DATA_LEN(buf);
+ /* Update element. */
+ (*txq->elts)[elts_head] = buf;
+ /* Prefetch next buffer data. */
+ if (pkts_n)
+ rte_prefetch0(rte_pktmbuf_mtod(*pkts,
+ volatile void *));
+ /* Retrieve Memory Region key for this memory pool. */
+ lkey = txq_mp2mr(txq, txq_mb2mp(buf));
+ if (buf->ol_flags & PKT_TX_VLAN_PKT)
+ mlx5_wqe_write_vlan(txq, wqe, addr, length, lkey,
+ buf->vlan_tci);
+ else
+ mlx5_wqe_write(txq, wqe, addr, length, lkey);
+ /* Should we enable HW CKSUM offload */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ wqe->wqe.eseg.cs_flags =
+ MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ } else {
+ wqe->wqe.eseg.cs_flags = 0;
+ }
+ while (--segs_n) {
+ /*
+ * Spill on next WQE when the current one does not have
+ * enough room left. Size of WQE must a be a multiple
+ * of data segment size.
+ */
+ assert(!(sizeof(*wqe) % sizeof(*dseg)));
+ if (!(ds % (sizeof(*wqe) / 16)))
+ dseg = (volatile void *)
+ &(*txq->wqes)[txq->wqe_ci++ &
+ (txq->wqe_n - 1)];
+ else
+ ++dseg;
+ ++ds;
+ buf = buf->next;
+ assert(buf);
+ /* Store segment information. */
+ dseg->byte_count = htonl(DATA_LEN(buf));
+ dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf));
+ dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+ (*txq->elts)[elts_head_next] = buf;
+ elts_head_next = (elts_head_next + 1) & (elts_n - 1);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ length += DATA_LEN(buf);
+#endif
+ ++j;
+ }
+ /* Update DS field in WQE. */
+ wqe->wqe.ctrl.data[1] &= htonl(0xffffffc0);
+ wqe->wqe.ctrl.data[1] |= htonl(ds & 0x3f);
+ elts_head = elts_head_next;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ elts_head = elts_head_next;
+ ++i;
+ } while (pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Check whether completion threshold has been reached. */
+ comp = txq->elts_comp + i + j;
+ if (comp >= MLX5_TX_COMP_THRESH) {
+ /* Request completion on last WQE. */
+ wqe->wqe.ctrl.data[2] = htonl(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->wqe.ctrl.data[3] = elts_head;
+ txq->elts_comp = 0;
+ } else {
+ txq->elts_comp = comp;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq);
+ txq->elts_head = elts_head;
+ return i;
}
/**
- * Handle scattered buffers for mlx5_tx_burst().
+ * DPDK callback for TX with inline support.
*
- * @param txq
- * TX queue structure.
- * @param segs
- * Number of segments in buf.
- * @param elt
- * TX queue element to fill.
- * @param[in] buf
- * Buffer to process.
- * @param elts_head
- * Index of the linear buffer to use if necessary (normally txq->elts_head).
- * @param[out] sges
- * Array filled with SGEs on success.
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
*
* @return
- * A structure containing the processed packet size in bytes and the
- * number of SGEs. Both fields are set to (unsigned int)-1 in case of
- * failure.
+ * Number of packets successfully transmitted (<= pkts_n).
*/
-static struct tx_burst_sg_ret {
- unsigned int length;
- unsigned int num;
-}
-tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
- struct rte_mbuf *buf, unsigned int elts_head,
- struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N])
+uint16_t
+mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- unsigned int sent_size = 0;
- unsigned int j;
- int linearize = 0;
-
- /* When there are too many segments, extra segments are
- * linearized in the last SGE. */
- if (unlikely(segs > RTE_DIM(*sges))) {
- segs = (RTE_DIM(*sges) - 1);
- linearize = 1;
- }
- /* Update element. */
- elt->buf = buf;
- /* Register segments as SGEs. */
- for (j = 0; (j != segs); ++j) {
- struct ibv_sge *sge = &(*sges)[j];
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const unsigned int elts_n = txq->elts_n;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ unsigned int max;
+ unsigned int comp;
+ volatile union mlx5_wqe *wqe = NULL;
+ unsigned int max_inline = txq->max_inline;
+
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Prefetch first packet cacheline. */
+ tx_prefetch_cqe(txq, txq->cq_ci);
+ tx_prefetch_cqe(txq, txq->cq_ci + 1);
+ rte_prefetch0(*pkts);
+ /* Start processing. */
+ txq_complete(txq);
+ max = (elts_n - (elts_head - txq->elts_tail));
+ if (max > elts_n)
+ max -= elts_n;
+ do {
+ struct rte_mbuf *buf = *(pkts++);
+ unsigned int elts_head_next;
+ uintptr_t addr;
+ uint32_t length;
uint32_t lkey;
+ unsigned int segs_n = buf->nb_segs;
+ volatile struct mlx5_wqe_data_seg *dseg;
+ unsigned int ds = sizeof(*wqe) / 16;
- /* Retrieve Memory Region key for this memory pool. */
- lkey = txq_mp2mr(txq, txq_mb2mp(buf));
- if (unlikely(lkey == (uint32_t)-1)) {
- /* MR does not exist. */
- DEBUG("%p: unable to get MP <-> MR association",
- (void *)txq);
- /* Clean up TX element. */
- elt->buf = NULL;
- goto stop;
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max < segs_n + 1)
+ break;
+ max -= segs_n;
+ --pkts_n;
+ elts_head_next = (elts_head + 1) & (elts_n - 1);
+ wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
+ dseg = &wqe->wqe.dseg;
+ tx_prefetch_wqe(txq, txq->wqe_ci);
+ tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ if (pkts_n)
+ rte_prefetch0(*pkts);
+ /* Should we enable HW CKSUM offload */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ wqe->inl.eseg.cs_flags =
+ MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ } else {
+ wqe->inl.eseg.cs_flags = 0;
}
- /* Update SGE. */
- sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
- if (txq->priv->vf)
- rte_prefetch0((volatile void *)
- (uintptr_t)sge->addr);
- sge->length = DATA_LEN(buf);
- sge->lkey = lkey;
- sent_size += sge->length;
- buf = NEXT(buf);
- }
- /* If buf is not NULL here and is not going to be linearized,
- * nb_segs is not valid. */
- assert(j == segs);
- assert((buf == NULL) || (linearize));
- /* Linearize extra segments. */
- if (linearize) {
- struct ibv_sge *sge = &(*sges)[segs];
- linear_t *linear = &(*txq->elts_linear)[elts_head];
- unsigned int size = linearize_mbuf(linear, buf);
-
- assert(segs == (RTE_DIM(*sges) - 1));
- if (size == 0) {
- /* Invalid packet. */
- DEBUG("%p: packet too large to be linearized.",
- (void *)txq);
- /* Clean up TX element. */
- elt->buf = NULL;
- goto stop;
+ /* Retrieve buffer information. */
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ length = DATA_LEN(buf);
+ /* Update element. */
+ (*txq->elts)[elts_head] = buf;
+ /* Prefetch next buffer data. */
+ if (pkts_n)
+ rte_prefetch0(rte_pktmbuf_mtod(*pkts,
+ volatile void *));
+ if ((length <= max_inline) && (segs_n == 1)) {
+ if (buf->ol_flags & PKT_TX_VLAN_PKT)
+ mlx5_wqe_write_inline_vlan(txq, wqe,
+ addr, length,
+ buf->vlan_tci);
+ else
+ mlx5_wqe_write_inline(txq, wqe, addr, length);
+ goto skip_segs;
+ } else {
+ /* Retrieve Memory Region key for this memory pool. */
+ lkey = txq_mp2mr(txq, txq_mb2mp(buf));
+ if (buf->ol_flags & PKT_TX_VLAN_PKT)
+ mlx5_wqe_write_vlan(txq, wqe, addr, length,
+ lkey, buf->vlan_tci);
+ else
+ mlx5_wqe_write(txq, wqe, addr, length, lkey);
}
- /* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */
- if (RTE_DIM(*sges) == 1) {
- do {
- struct rte_mbuf *next = NEXT(buf);
-
- rte_pktmbuf_free_seg(buf);
- buf = next;
- } while (buf != NULL);
- elt->buf = NULL;
+ while (--segs_n) {
+ /*
+ * Spill on next WQE when the current one does not have
+ * enough room left. Size of WQE must a be a multiple
+ * of data segment size.
+ */
+ assert(!(sizeof(*wqe) % sizeof(*dseg)));
+ if (!(ds % (sizeof(*wqe) / 16)))
+ dseg = (volatile void *)
+ &(*txq->wqes)[txq->wqe_ci++ &
+ (txq->wqe_n - 1)];
+ else
+ ++dseg;
+ ++ds;
+ buf = buf->next;
+ assert(buf);
+ /* Store segment information. */
+ dseg->byte_count = htonl(DATA_LEN(buf));
+ dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf));
+ dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+ (*txq->elts)[elts_head_next] = buf;
+ elts_head_next = (elts_head_next + 1) & (elts_n - 1);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ length += DATA_LEN(buf);
+#endif
+ ++j;
}
- /* Update SGE. */
- sge->addr = (uintptr_t)&(*linear)[0];
- sge->length = size;
- sge->lkey = txq->mr_linear->lkey;
- sent_size += size;
- /* Include last segment. */
- segs++;
+ /* Update DS field in WQE. */
+ wqe->inl.ctrl.data[1] &= htonl(0xffffffc0);
+ wqe->inl.ctrl.data[1] |= htonl(ds & 0x3f);
+skip_segs:
+ elts_head = elts_head_next;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ ++i;
+ } while (pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Check whether completion threshold has been reached. */
+ comp = txq->elts_comp + i + j;
+ if (comp >= MLX5_TX_COMP_THRESH) {
+ /* Request completion on last WQE. */
+ wqe->inl.ctrl.data[2] = htonl(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->inl.ctrl.data[3] = elts_head;
+ txq->elts_comp = 0;
+ } else {
+ txq->elts_comp = comp;
}
- return (struct tx_burst_sg_ret){
- .length = sent_size,
- .num = segs,
- };
-stop:
- return (struct tx_burst_sg_ret){
- .length = -1,
- .num = -1,
- };
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq);
+ txq->elts_head = elts_head;
+ return i;
}
-#endif /* MLX5_PMD_SGE_WR_N > 1 */
+/**
+ * Open a MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ * @param length
+ * Packet length.
+ */
+static inline void
+mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+{
+ uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1);
+ volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
+ (volatile struct mlx5_wqe_data_seg (*)[])
+ (uintptr_t)&(*txq->wqes)[(idx + 1) & (txq->wqe_n - 1)];
+
+ mpw->state = MLX5_MPW_STATE_OPENED;
+ mpw->pkts_n = 0;
+ mpw->len = length;
+ mpw->total_len = 0;
+ mpw->wqe = &(*txq->wqes)[idx];
+ mpw->wqe->mpw.eseg.mss = htons(length);
+ mpw->wqe->mpw.eseg.inline_hdr_sz = 0;
+ mpw->wqe->mpw.eseg.rsvd0 = 0;
+ mpw->wqe->mpw.eseg.rsvd1 = 0;
+ mpw->wqe->mpw.eseg.rsvd2 = 0;
+ mpw->wqe->mpw.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_LSO_MPW);
+ mpw->wqe->mpw.ctrl.data[2] = 0;
+ mpw->wqe->mpw.ctrl.data[3] = 0;
+ mpw->data.dseg[0] = &mpw->wqe->mpw.dseg[0];
+ mpw->data.dseg[1] = &mpw->wqe->mpw.dseg[1];
+ mpw->data.dseg[2] = &(*dseg)[0];
+ mpw->data.dseg[3] = &(*dseg)[1];
+ mpw->data.dseg[4] = &(*dseg)[2];
+}
/**
- * DPDK callback for TX.
+ * Close a MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ */
+static inline void
+mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
+{
+ unsigned int num = mpw->pkts_n;
+
+ /*
+ * Store size in multiple of 16 bytes. Control and Ethernet segments
+ * count as 2.
+ */
+ mpw->wqe->mpw.ctrl.data[1] = htonl(txq->qp_num_8s | (2 + num));
+ mpw->state = MLX5_MPW_STATE_CLOSED;
+ if (num < 3)
+ ++txq->wqe_ci;
+ else
+ txq->wqe_ci += 2;
+ tx_prefetch_wqe(txq, txq->wqe_ci);
+ tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+}
+
+/**
+ * DPDK callback for TX with MPW support.
*
* @param dpdk_txq
* Generic pointer to TX queue structure.
@@ -531,224 +959,399 @@ stop:
* Number of packets successfully transmitted (<= pkts_n).
*/
uint16_t
-mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
- unsigned int elts_head = txq->elts_head;
+ uint16_t elts_head = txq->elts_head;
const unsigned int elts_n = txq->elts_n;
- unsigned int elts_comp_cd = txq->elts_comp_cd;
- unsigned int elts_comp = 0;
- unsigned int i;
+ unsigned int i = 0;
+ unsigned int j = 0;
unsigned int max;
- int err;
- struct rte_mbuf *buf = pkts[0];
+ unsigned int comp;
+ struct mlx5_mpw mpw = {
+ .state = MLX5_MPW_STATE_CLOSED,
+ };
- assert(elts_comp_cd != 0);
+ if (unlikely(!pkts_n))
+ return 0;
/* Prefetch first packet cacheline. */
- rte_prefetch0(buf);
+ tx_prefetch_cqe(txq, txq->cq_ci);
+ tx_prefetch_wqe(txq, txq->wqe_ci);
+ tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ /* Start processing. */
txq_complete(txq);
max = (elts_n - (elts_head - txq->elts_tail));
if (max > elts_n)
max -= elts_n;
- assert(max >= 1);
- assert(max <= elts_n);
- /* Always leave one free entry in the ring. */
- --max;
- if (max == 0)
+ do {
+ struct rte_mbuf *buf = *(pkts++);
+ unsigned int elts_head_next;
+ uint32_t length;
+ unsigned int segs_n = buf->nb_segs;
+ uint32_t cs_flags = 0;
+
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max < segs_n + 1)
+ break;
+ /* Do not bother with large packets MPW cannot handle. */
+ if (segs_n > MLX5_MPW_DSEG_MAX)
+ break;
+ max -= segs_n;
+ --pkts_n;
+ /* Should we enable HW CKSUM offload */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
+ cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ /* Retrieve packet information. */
+ length = PKT_LEN(buf);
+ assert(length);
+ /* Start new session if packet differs. */
+ if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
+ ((mpw.len != length) ||
+ (segs_n != 1) ||
+ (mpw.wqe->mpw.eseg.cs_flags != cs_flags)))
+ mlx5_mpw_close(txq, &mpw);
+ if (mpw.state == MLX5_MPW_STATE_CLOSED) {
+ mlx5_mpw_new(txq, &mpw, length);
+ mpw.wqe->mpw.eseg.cs_flags = cs_flags;
+ }
+ /* Multi-segment packets must be alone in their MPW. */
+ assert((segs_n == 1) || (mpw.pkts_n == 0));
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length = 0;
+#endif
+ do {
+ volatile struct mlx5_wqe_data_seg *dseg;
+ uintptr_t addr;
+
+ elts_head_next = (elts_head + 1) & (elts_n - 1);
+ assert(buf);
+ (*txq->elts)[elts_head] = buf;
+ dseg = mpw.data.dseg[mpw.pkts_n];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ *dseg = (struct mlx5_wqe_data_seg){
+ .byte_count = htonl(DATA_LEN(buf)),
+ .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .addr = htonll(addr),
+ };
+ elts_head = elts_head_next;
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length += DATA_LEN(buf);
+#endif
+ buf = buf->next;
+ ++mpw.pkts_n;
+ ++j;
+ } while (--segs_n);
+ assert(length == mpw.len);
+ if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
+ mlx5_mpw_close(txq, &mpw);
+ elts_head = elts_head_next;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ ++i;
+ } while (pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
return 0;
- if (max > pkts_n)
- max = pkts_n;
- for (i = 0; (i != max); ++i) {
- struct rte_mbuf *buf_next = pkts[i + 1];
- unsigned int elts_head_next =
- (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
- struct txq_elt *elt = &(*txq->elts)[elts_head];
- unsigned int segs = NB_SEGS(buf);
+ /* Check whether completion threshold has been reached. */
+ /* "j" includes both packets and segments. */
+ comp = txq->elts_comp + j;
+ if (comp >= MLX5_TX_COMP_THRESH) {
+ volatile union mlx5_wqe *wqe = mpw.wqe;
+
+ /* Request completion on last WQE. */
+ wqe->mpw.ctrl.data[2] = htonl(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->mpw.ctrl.data[3] = elts_head;
+ txq->elts_comp = 0;
+ } else {
+ txq->elts_comp = comp;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
- unsigned int sent_size = 0;
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
#endif
- uint32_t send_flags = 0;
-#ifdef HAVE_VERBS_VLAN_INSERTION
- int insert_vlan = 0;
-#endif /* HAVE_VERBS_VLAN_INSERTION */
-
- if (i + 1 < max)
- rte_prefetch0(buf_next);
- /* Request TX completion. */
- if (unlikely(--elts_comp_cd == 0)) {
- elts_comp_cd = txq->elts_comp_cd_init;
- ++elts_comp;
- send_flags |= IBV_EXP_QP_BURST_SIGNALED;
- }
+ /* Ring QP doorbell. */
+ if (mpw.state == MLX5_MPW_STATE_OPENED)
+ mlx5_mpw_close(txq, &mpw);
+ mlx5_tx_dbrec(txq);
+ txq->elts_head = elts_head;
+ return i;
+}
+
+/**
+ * Open a MPW inline session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ * @param length
+ * Packet length.
+ */
+static inline void
+mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
+{
+ uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1);
+
+ mpw->state = MLX5_MPW_INL_STATE_OPENED;
+ mpw->pkts_n = 0;
+ mpw->len = length;
+ mpw->total_len = 0;
+ mpw->wqe = &(*txq->wqes)[idx];
+ mpw->wqe->mpw_inl.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_LSO_MPW);
+ mpw->wqe->mpw_inl.ctrl.data[2] = 0;
+ mpw->wqe->mpw_inl.ctrl.data[3] = 0;
+ mpw->wqe->mpw_inl.eseg.mss = htons(length);
+ mpw->wqe->mpw_inl.eseg.inline_hdr_sz = 0;
+ mpw->wqe->mpw_inl.eseg.cs_flags = 0;
+ mpw->wqe->mpw_inl.eseg.rsvd0 = 0;
+ mpw->wqe->mpw_inl.eseg.rsvd1 = 0;
+ mpw->wqe->mpw_inl.eseg.rsvd2 = 0;
+ mpw->data.raw = &mpw->wqe->mpw_inl.data[0];
+}
+
+/**
+ * Close a MPW inline session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ */
+static inline void
+mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
+{
+ unsigned int size;
+
+ size = sizeof(*mpw->wqe) - MLX5_MWQE64_INL_DATA + mpw->total_len;
+ /*
+ * Store size in multiple of 16 bytes. Control and Ethernet segments
+ * count as 2.
+ */
+ mpw->wqe->mpw_inl.ctrl.data[1] =
+ htonl(txq->qp_num_8s | ((size + 15) / 16));
+ mpw->state = MLX5_MPW_STATE_CLOSED;
+ mpw->wqe->mpw_inl.byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
+ txq->wqe_ci += (size + (sizeof(*mpw->wqe) - 1)) / sizeof(*mpw->wqe);
+}
+
+/**
+ * DPDK callback for TX with MPW inline support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const unsigned int elts_n = txq->elts_n;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ unsigned int max;
+ unsigned int comp;
+ unsigned int inline_room = txq->max_inline;
+ struct mlx5_mpw mpw = {
+ .state = MLX5_MPW_STATE_CLOSED,
+ };
+
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Prefetch first packet cacheline. */
+ tx_prefetch_cqe(txq, txq->cq_ci);
+ tx_prefetch_wqe(txq, txq->wqe_ci);
+ tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ /* Start processing. */
+ txq_complete(txq);
+ max = (elts_n - (elts_head - txq->elts_tail));
+ if (max > elts_n)
+ max -= elts_n;
+ do {
+ struct rte_mbuf *buf = *(pkts++);
+ unsigned int elts_head_next;
+ uintptr_t addr;
+ uint32_t length;
+ unsigned int segs_n = buf->nb_segs;
+ uint32_t cs_flags = 0;
+
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max < segs_n + 1)
+ break;
+ /* Do not bother with large packets MPW cannot handle. */
+ if (segs_n > MLX5_MPW_DSEG_MAX)
+ break;
+ max -= segs_n;
+ --pkts_n;
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
- /* HW does not support checksum offloads at arbitrary
- * offsets but automatically recognizes the packet
- * type. For inner L3/L4 checksums, only VXLAN (UDP)
- * tunnels are currently supported. */
- if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
- send_flags |= IBV_EXP_QP_BURST_TUNNEL;
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
+ cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ /* Retrieve packet information. */
+ length = PKT_LEN(buf);
+ /* Start new session if packet differs. */
+ if (mpw.state == MLX5_MPW_STATE_OPENED) {
+ if ((mpw.len != length) ||
+ (segs_n != 1) ||
+ (mpw.wqe->mpw.eseg.cs_flags != cs_flags))
+ mlx5_mpw_close(txq, &mpw);
+ } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
+ if ((mpw.len != length) ||
+ (segs_n != 1) ||
+ (length > inline_room) ||
+ (mpw.wqe->mpw_inl.eseg.cs_flags != cs_flags)) {
+ mlx5_mpw_inline_close(txq, &mpw);
+ inline_room = txq->max_inline;
+ }
}
- if (buf->ol_flags & PKT_TX_VLAN_PKT) {
-#ifdef HAVE_VERBS_VLAN_INSERTION
- if (!txq->priv->mps)
- insert_vlan = 1;
- else
-#endif /* HAVE_VERBS_VLAN_INSERTION */
- {
- err = insert_vlan_sw(buf);
- if (unlikely(err))
- goto stop;
+ if (mpw.state == MLX5_MPW_STATE_CLOSED) {
+ if ((segs_n != 1) ||
+ (length > inline_room)) {
+ mlx5_mpw_new(txq, &mpw, length);
+ mpw.wqe->mpw.eseg.cs_flags = cs_flags;
+ } else {
+ mlx5_mpw_inline_new(txq, &mpw, length);
+ mpw.wqe->mpw_inl.eseg.cs_flags = cs_flags;
}
}
- if (likely(segs == 1)) {
- uintptr_t addr;
- uint32_t length;
- uint32_t lkey;
- uintptr_t buf_next_addr;
+ /* Multi-segment packets must be alone in their MPW. */
+ assert((segs_n == 1) || (mpw.pkts_n == 0));
+ if (mpw.state == MLX5_MPW_STATE_OPENED) {
+ assert(inline_room == txq->max_inline);
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length = 0;
+#endif
+ do {
+ volatile struct mlx5_wqe_data_seg *dseg;
+
+ elts_head_next =
+ (elts_head + 1) & (elts_n - 1);
+ assert(buf);
+ (*txq->elts)[elts_head] = buf;
+ dseg = mpw.data.dseg[mpw.pkts_n];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ *dseg = (struct mlx5_wqe_data_seg){
+ .byte_count = htonl(DATA_LEN(buf)),
+ .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .addr = htonll(addr),
+ };
+ elts_head = elts_head_next;
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length += DATA_LEN(buf);
+#endif
+ buf = buf->next;
+ ++mpw.pkts_n;
+ ++j;
+ } while (--segs_n);
+ assert(length == mpw.len);
+ if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
+ mlx5_mpw_close(txq, &mpw);
+ } else {
+ unsigned int max;
- /* Retrieve buffer information. */
+ assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
+ assert(length <= inline_room);
+ assert(length == DATA_LEN(buf));
+ elts_head_next = (elts_head + 1) & (elts_n - 1);
addr = rte_pktmbuf_mtod(buf, uintptr_t);
- length = DATA_LEN(buf);
- /* Update element. */
- elt->buf = buf;
- if (txq->priv->vf)
- rte_prefetch0((volatile void *)
- (uintptr_t)addr);
- /* Prefetch next buffer data. */
- if (i + 1 < max) {
- buf_next_addr =
- rte_pktmbuf_mtod(buf_next, uintptr_t);
- rte_prefetch0((volatile void *)
- (uintptr_t)buf_next_addr);
+ (*txq->elts)[elts_head] = buf;
+ /* Maximum number of bytes before wrapping. */
+ max = ((uintptr_t)&(*txq->wqes)[txq->wqe_n] -
+ (uintptr_t)mpw.data.raw);
+ if (length > max) {
+ rte_memcpy((void *)(uintptr_t)mpw.data.raw,
+ (void *)addr,
+ max);
+ mpw.data.raw =
+ (volatile void *)&(*txq->wqes)[0];
+ rte_memcpy((void *)(uintptr_t)mpw.data.raw,
+ (void *)(addr + max),
+ length - max);
+ mpw.data.raw += length - max;
+ } else {
+ rte_memcpy((void *)(uintptr_t)mpw.data.raw,
+ (void *)addr,
+ length);
+ mpw.data.raw += length;
}
- /* Put packet into send queue. */
-#if MLX5_PMD_MAX_INLINE > 0
- if (length <= txq->max_inline) {
-#ifdef HAVE_VERBS_VLAN_INSERTION
- if (insert_vlan)
- err = txq->send_pending_inline_vlan
- (txq->qp,
- (void *)addr,
- length,
- send_flags,
- &buf->vlan_tci);
- else
-#endif /* HAVE_VERBS_VLAN_INSERTION */
- err = txq->send_pending_inline
- (txq->qp,
- (void *)addr,
- length,
- send_flags);
- } else
-#endif
- {
- /* Retrieve Memory Region key for this
- * memory pool. */
- lkey = txq_mp2mr(txq, txq_mb2mp(buf));
- if (unlikely(lkey == (uint32_t)-1)) {
- /* MR does not exist. */
- DEBUG("%p: unable to get MP <-> MR"
- " association", (void *)txq);
- /* Clean up TX element. */
- elt->buf = NULL;
- goto stop;
- }
-#ifdef HAVE_VERBS_VLAN_INSERTION
- if (insert_vlan)
- err = txq->send_pending_vlan
- (txq->qp,
- addr,
- length,
- lkey,
- send_flags,
- &buf->vlan_tci);
- else
-#endif /* HAVE_VERBS_VLAN_INSERTION */
- err = txq->send_pending
- (txq->qp,
- addr,
- length,
- lkey,
- send_flags);
+ if ((uintptr_t)mpw.data.raw ==
+ (uintptr_t)&(*txq->wqes)[txq->wqe_n])
+ mpw.data.raw =
+ (volatile void *)&(*txq->wqes)[0];
+ ++mpw.pkts_n;
+ ++j;
+ if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
+ mlx5_mpw_inline_close(txq, &mpw);
+ inline_room = txq->max_inline;
+ } else {
+ inline_room -= length;
}
- if (unlikely(err))
- goto stop;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- sent_size += length;
-#endif
- } else {
-#if MLX5_PMD_SGE_WR_N > 1
- struct ibv_sge sges[MLX5_PMD_SGE_WR_N];
- struct tx_burst_sg_ret ret;
-
- ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
- &sges);
- if (ret.length == (unsigned int)-1)
- goto stop;
- /* Put SG list into send queue. */
-#ifdef HAVE_VERBS_VLAN_INSERTION
- if (insert_vlan)
- err = txq->send_pending_sg_list_vlan
- (txq->qp,
- sges,
- ret.num,
- send_flags,
- &buf->vlan_tci);
- else
-#endif /* HAVE_VERBS_VLAN_INSERTION */
- err = txq->send_pending_sg_list
- (txq->qp,
- sges,
- ret.num,
- send_flags);
- if (unlikely(err))
- goto stop;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- sent_size += ret.length;
-#endif
-#else /* MLX5_PMD_SGE_WR_N > 1 */
- DEBUG("%p: TX scattered buffers support not"
- " compiled in", (void *)txq);
- goto stop;
-#endif /* MLX5_PMD_SGE_WR_N > 1 */
}
+ mpw.total_len += length;
elts_head = elts_head_next;
- buf = buf_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
- txq->stats.obytes += sent_size;
+ txq->stats.obytes += length;
#endif
- }
-stop:
+ ++i;
+ } while (pkts_n);
/* Take a shortcut if nothing must be sent. */
if (unlikely(i == 0))
return 0;
+ /* Check whether completion threshold has been reached. */
+ /* "j" includes both packets and segments. */
+ comp = txq->elts_comp + j;
+ if (comp >= MLX5_TX_COMP_THRESH) {
+ volatile union mlx5_wqe *wqe = mpw.wqe;
+
+ /* Request completion on last WQE. */
+ wqe->mpw_inl.ctrl.data[2] = htonl(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->mpw_inl.ctrl.data[3] = elts_head;
+ txq->elts_comp = 0;
+ } else {
+ txq->elts_comp = comp;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent packets counter. */
txq->stats.opackets += i;
#endif
/* Ring QP doorbell. */
- err = txq->send_flush(txq->qp);
- if (unlikely(err)) {
- /* A nonzero value is not supposed to be returned.
- * Nothing can be done about it. */
- DEBUG("%p: send_flush() failed with error %d",
- (void *)txq, err);
- }
+ if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
+ mlx5_mpw_inline_close(txq, &mpw);
+ else if (mpw.state == MLX5_MPW_STATE_OPENED)
+ mlx5_mpw_close(txq, &mpw);
+ mlx5_tx_dbrec(txq);
txq->elts_head = elts_head;
- txq->elts_comp += elts_comp;
- txq->elts_comp_cd = elts_comp_cd;
return i;
}
/**
* Translate RX completion flags to packet type.
*
- * @param flags
- * RX completion flags returned by poll_length_flags().
+ * @param[in] cqe
+ * Pointer to CQE.
*
* @note: fix mlx5_dev_supported_ptypes_get() if any change here.
*
@@ -756,11 +1359,13 @@ stop:
* Packet type for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_pkt_type(uint32_t flags)
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
{
uint32_t pkt_type;
+ uint8_t flags = cqe->l4_hdr_type_etc;
+ uint8_t info = cqe->rsvd0[0];
- if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
+ if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET)
pkt_type =
TRANSPOSE(flags,
IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
@@ -777,66 +1382,157 @@ rxq_cq_to_pkt_type(uint32_t flags)
else
pkt_type =
TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV4_PACKET,
- RTE_PTYPE_L3_IPV4) |
+ MLX5_CQE_L3_HDR_TYPE_IPV6,
+ RTE_PTYPE_L3_IPV6) |
TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV6_PACKET,
- RTE_PTYPE_L3_IPV6);
+ MLX5_CQE_L3_HDR_TYPE_IPV4,
+ RTE_PTYPE_L3_IPV4);
return pkt_type;
}
/**
+ * Get size of the next packet for a given CQE. For compressed CQEs, the
+ * consumer index is updated only once all packets of the current one have
+ * been processed.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ * @param cqe
+ * CQE to process.
+ *
+ * @return
+ * Packet size in bytes (0 if there is none), -1 in case of completion
+ * with error.
+ */
+static inline int
+mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
+ uint16_t cqe_cnt)
+{
+ struct rxq_zip *zip = &rxq->zip;
+ uint16_t cqe_n = cqe_cnt + 1;
+ int len = 0;
+
+ /* Process compressed data in the CQE and mini arrays. */
+ if (zip->ai) {
+ volatile struct mlx5_mini_cqe8 (*mc)[8] =
+ (volatile struct mlx5_mini_cqe8 (*)[8])
+ (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].cqe64);
+
+ len = ntohl((*mc)[zip->ai & 7].byte_cnt);
+ if ((++zip->ai & 7) == 0) {
+ /*
+ * Increment consumer index to skip the number of
+ * CQEs consumed. Hardware leaves holes in the CQ
+ * ring for software use.
+ */
+ zip->ca = zip->na;
+ zip->na += 8;
+ }
+ if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
+ uint16_t idx = rxq->cq_ci;
+ uint16_t end = zip->cq_ci;
+
+ while (idx != end) {
+ (*rxq->cqes)[idx & cqe_cnt].cqe64.op_own =
+ MLX5_CQE_INVALIDATE;
+ ++idx;
+ }
+ rxq->cq_ci = zip->cq_ci;
+ zip->ai = 0;
+ }
+ /* No compressed data, get next CQE and verify if it is compressed. */
+ } else {
+ int ret;
+ int8_t op_own;
+
+ ret = check_cqe64(cqe, cqe_n, rxq->cq_ci);
+ if (unlikely(ret == 1))
+ return 0;
+ ++rxq->cq_ci;
+ op_own = cqe->op_own;
+ if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
+ volatile struct mlx5_mini_cqe8 (*mc)[8] =
+ (volatile struct mlx5_mini_cqe8 (*)[8])
+ (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
+ cqe_cnt].cqe64);
+
+ /* Fix endianness. */
+ zip->cqe_cnt = ntohl(cqe->byte_cnt);
+ /*
+ * Current mini array position is the one returned by
+ * check_cqe64().
+ *
+ * If completion comprises several mini arrays, as a
+ * special case the second one is located 7 CQEs after
+ * the initial CQE instead of 8 for subsequent ones.
+ */
+ zip->ca = rxq->cq_ci & cqe_cnt;
+ zip->na = zip->ca + 7;
+ /* Compute the next non compressed CQE. */
+ --rxq->cq_ci;
+ zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
+ /* Get packet size to return. */
+ len = ntohl((*mc)[0].byte_cnt);
+ zip->ai = 1;
+ } else {
+ len = ntohl(cqe->byte_cnt);
+ }
+ /* Error while receiving packet. */
+ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
+ return -1;
+ }
+ return len;
+}
+
+/**
* Translate RX completion flags to offload flags.
*
* @param[in] rxq
* Pointer to RX queue structure.
- * @param flags
- * RX completion flags returned by poll_length_flags().
+ * @param[in] cqe
+ * Pointer to CQE.
*
* @return
* Offload flags (ol_flags) for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
{
uint32_t ol_flags = 0;
+ uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
+ uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
+ uint8_t info = cqe->rsvd0[0];
- if (rxq->csum) {
- /* Set IP checksum flag only for IPv4/IPv6 packets. */
- if (flags &
- (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET))
- ol_flags |=
- TRANSPOSE(~flags,
- IBV_EXP_CQ_RX_IP_CSUM_OK,
- PKT_RX_IP_CKSUM_BAD);
-#ifdef HAVE_EXP_CQ_RX_TCP_PACKET
- /* Set L4 checksum flag only for TCP/UDP packets. */
- if (flags &
- (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET))
-#endif /* HAVE_EXP_CQ_RX_TCP_PACKET */
- ol_flags |=
- TRANSPOSE(~flags,
- IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
- PKT_RX_L4_CKSUM_BAD);
- }
+ if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
+ (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
+ ol_flags |=
+ (!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) *
+ PKT_RX_IP_CKSUM_BAD);
+ if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
+ (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
+ (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
+ (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
+ ol_flags |=
+ (!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) *
+ PKT_RX_L4_CKSUM_BAD);
/*
* PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
* of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
* (its value is 0).
*/
- if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+ if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
ol_flags |=
- TRANSPOSE(~flags,
+ TRANSPOSE(~cqe->l4_hdr_type_etc,
IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
PKT_RX_IP_CKSUM_BAD) |
- TRANSPOSE(~flags,
+ TRANSPOSE(~cqe->l4_hdr_type_etc,
IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
PKT_RX_L4_CKSUM_BAD);
return ol_flags;
}
/**
- * DPDK callback for RX with scattered packets support.
+ * DPDK callback for RX.
*
* @param dpdk_rxq
* Generic pointer to RX queue structure.
@@ -849,353 +1545,127 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
* Number of packets successfully received (<= pkts_n).
*/
uint16_t
-mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
- const unsigned int elts_n = rxq->elts_n;
- unsigned int elts_head = rxq->elts_head;
- unsigned int i;
- unsigned int pkts_ret = 0;
- int ret;
+ struct rxq *rxq = dpdk_rxq;
+ const unsigned int wqe_cnt = rxq->elts_n - 1;
+ const unsigned int cqe_cnt = rxq->cqe_n - 1;
+ const unsigned int sges_n = rxq->sges_n;
+ struct rte_mbuf *pkt = NULL;
+ struct rte_mbuf *seg = NULL;
+ volatile struct mlx5_cqe64 *cqe =
+ &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
+ unsigned int i = 0;
+ unsigned int rq_ci = rxq->rq_ci << sges_n;
+ int len;
- if (unlikely(!rxq->sp))
- return mlx5_rx_burst(dpdk_rxq, pkts, pkts_n);
- if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */
- return 0;
- for (i = 0; (i != pkts_n); ++i) {
- struct rxq_elt_sp *elt = &(*elts)[elts_head];
- unsigned int len;
- unsigned int pkt_buf_len;
- struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */
- struct rte_mbuf **pkt_buf_next = &pkt_buf;
- unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
- unsigned int j = 0;
- uint32_t flags;
- uint16_t vlan_tci;
-
- /* Sanity checks. */
- assert(elts_head < rxq->elts_n);
- assert(rxq->elts_head < rxq->elts_n);
- ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
- if (unlikely(ret < 0)) {
- struct ibv_wc wc;
- int wcs_n;
-
- DEBUG("rxq=%p, poll_length() failed (ret=%d)",
- (void *)rxq, ret);
- /* ibv_poll_cq() must be used in case of failure. */
- wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
- if (unlikely(wcs_n == 0))
- break;
- if (unlikely(wcs_n < 0)) {
- DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
- (void *)rxq, wcs_n);
+ while (pkts_n) {
+ unsigned int idx = rq_ci & wqe_cnt;
+ volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
+ struct rte_mbuf *rep = (*rxq->elts)[idx];
+
+ if (pkt)
+ NEXT(seg) = rep;
+ seg = rep;
+ rte_prefetch0(seg);
+ rte_prefetch0(cqe);
+ rte_prefetch0(wqe);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(rep == NULL)) {
+ while (pkt) {
+ seg = NEXT(pkt);
+ rte_mbuf_refcnt_set(pkt, 0);
+ __rte_mbuf_raw_free(pkt);
+ pkt = seg;
+ }
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ if (!pkt) {
+ cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
+ len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt);
+ if (len == 0) {
+ rte_mbuf_refcnt_set(rep, 0);
+ __rte_mbuf_raw_free(rep);
break;
}
- assert(wcs_n == 1);
- if (unlikely(wc.status != IBV_WC_SUCCESS)) {
- /* Whatever, just repost the offending WR. */
- DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
- " completion status (%d): %s",
- (void *)rxq, wc.wr_id, wc.status,
- ibv_wc_status_str(wc.status));
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment dropped packets counter. */
+ if (unlikely(len == -1)) {
+ /* RX error, packet is likely too large. */
+ rte_mbuf_refcnt_set(rep, 0);
+ __rte_mbuf_raw_free(rep);
++rxq->stats.idropped;
-#endif
- goto repost;
+ goto skip;
}
- ret = wc.byte_len;
- }
- if (ret == 0)
- break;
- assert(ret >= (rxq->crc_present << 2));
- len = ret - (rxq->crc_present << 2);
- pkt_buf_len = len;
- /*
- * Replace spent segments with new ones, concatenate and
- * return them as pkt_buf.
- */
- while (1) {
- struct ibv_sge *sge = &elt->sges[j];
- struct rte_mbuf *seg = elt->bufs[j];
- struct rte_mbuf *rep;
- unsigned int seg_tailroom;
-
- assert(seg != NULL);
- /*
- * Fetch initial bytes of packet descriptor into a
- * cacheline while allocating rep.
- */
- rte_prefetch0(seg);
- rep = __rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(rep == NULL)) {
- /*
- * Unable to allocate a replacement mbuf,
- * repost WR.
- */
- DEBUG("rxq=%p: can't allocate a new mbuf",
- (void *)rxq);
- if (pkt_buf != NULL) {
- *pkt_buf_next = NULL;
- rte_pktmbuf_free(pkt_buf);
+ pkt = seg;
+ assert(len >= (rxq->crc_present << 2));
+ /* Update packet information. */
+ if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
+ rxq->crc_present) {
+ if (rxq->csum) {
+ pkt->packet_type =
+ rxq_cq_to_pkt_type(cqe);
+ pkt->ol_flags =
+ rxq_cq_to_ol_flags(rxq, cqe);
}
- /* Increment out of memory counters. */
- ++rxq->stats.rx_nombuf;
- ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
- goto repost;
- }
-#ifndef NDEBUG
- /* Poison user-modifiable fields in rep. */
- NEXT(rep) = (void *)((uintptr_t)-1);
- SET_DATA_OFF(rep, 0xdead);
- DATA_LEN(rep) = 0xd00d;
- PKT_LEN(rep) = 0xdeadd00d;
- NB_SEGS(rep) = 0x2a;
- PORT(rep) = 0x2a;
- rep->ol_flags = -1;
-#endif
- assert(rep->buf_len == seg->buf_len);
- assert(rep->buf_len == rxq->mb_len);
- /* Reconfigure sge to use rep instead of seg. */
- assert(sge->lkey == rxq->mr->lkey);
- sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
- elt->bufs[j] = rep;
- ++j;
- /* Update pkt_buf if it's the first segment, or link
- * seg to the previous one and update pkt_buf_next. */
- *pkt_buf_next = seg;
- pkt_buf_next = &NEXT(seg);
- /* Update seg information. */
- seg_tailroom = (seg->buf_len - seg_headroom);
- assert(sge->length == seg_tailroom);
- SET_DATA_OFF(seg, seg_headroom);
- if (likely(len <= seg_tailroom)) {
- /* Last segment. */
- DATA_LEN(seg) = len;
- PKT_LEN(seg) = len;
- /* Sanity check. */
- assert(rte_pktmbuf_headroom(seg) ==
- seg_headroom);
- assert(rte_pktmbuf_tailroom(seg) ==
- (seg_tailroom - len));
- break;
- }
- DATA_LEN(seg) = seg_tailroom;
- PKT_LEN(seg) = seg_tailroom;
- /* Sanity check. */
- assert(rte_pktmbuf_headroom(seg) == seg_headroom);
- assert(rte_pktmbuf_tailroom(seg) == 0);
- /* Fix len and clear headroom for next segments. */
- len -= seg_tailroom;
- seg_headroom = 0;
- }
- /* Update head and tail segments. */
- *pkt_buf_next = NULL;
- assert(pkt_buf != NULL);
- assert(j != 0);
- NB_SEGS(pkt_buf) = j;
- PORT(pkt_buf) = rxq->port_id;
- PKT_LEN(pkt_buf) = pkt_buf_len;
- if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
- pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
- pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
- if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
- pkt_buf->ol_flags |= PKT_RX_VLAN_PKT;
- pkt_buf->vlan_tci = vlan_tci;
+ if (cqe->l4_hdr_type_etc &
+ MLX5_CQE_VLAN_STRIPPED) {
+ pkt->ol_flags |= PKT_RX_VLAN_PKT |
+ PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = ntohs(cqe->vlan_info);
+ }
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
}
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ PKT_LEN(pkt) = len;
}
-
- /* Return packet. */
- *(pkts++) = pkt_buf;
- ++pkts_ret;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment bytes counter. */
- rxq->stats.ibytes += pkt_buf_len;
-#endif
-repost:
- ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges));
- if (unlikely(ret)) {
- /* Inability to repost WRs is fatal. */
- DEBUG("%p: recv_sg_list(): failed (ret=%d)",
- (void *)rxq->priv,
- ret);
- abort();
- }
- if (++elts_head >= elts_n)
- elts_head = 0;
- continue;
- }
- if (unlikely(i == 0))
- return 0;
- rxq->elts_head = elts_head;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment packets counter. */
- rxq->stats.ipackets += pkts_ret;
-#endif
- return pkts_ret;
-}
-
-/**
- * DPDK callback for RX.
- *
- * The following function is the same as mlx5_rx_burst_sp(), except it doesn't
- * manage scattered packets. Improves performance when MRU is lower than the
- * size of the first segment.
- *
- * @param dpdk_rxq
- * Generic pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets successfully received (<= pkts_n).
- */
-uint16_t
-mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
- const unsigned int elts_n = rxq->elts_n;
- unsigned int elts_head = rxq->elts_head;
- struct ibv_sge sges[pkts_n];
- unsigned int i;
- unsigned int pkts_ret = 0;
- int ret;
-
- if (unlikely(rxq->sp))
- return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n);
- for (i = 0; (i != pkts_n); ++i) {
- struct rxq_elt *elt = &(*elts)[elts_head];
- unsigned int len;
- struct rte_mbuf *seg = elt->buf;
- struct rte_mbuf *rep;
- uint32_t flags;
- uint16_t vlan_tci;
-
- /* Sanity checks. */
- assert(seg != NULL);
- assert(elts_head < rxq->elts_n);
- assert(rxq->elts_head < rxq->elts_n);
+ DATA_LEN(rep) = DATA_LEN(seg);
+ PKT_LEN(rep) = PKT_LEN(seg);
+ SET_DATA_OFF(rep, DATA_OFF(seg));
+ NB_SEGS(rep) = NB_SEGS(seg);
+ PORT(rep) = PORT(seg);
+ NEXT(rep) = NULL;
+ (*rxq->elts)[idx] = rep;
/*
- * Fetch initial bytes of packet descriptor into a
- * cacheline while allocating rep.
+ * Fill NIC descriptor with the new buffer. The lkey and size
+ * of the buffers are already known, only the buffer address
+ * changes.
*/
- rte_prefetch0(seg);
- rte_prefetch0(&seg->cacheline1);
- ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
- if (unlikely(ret < 0)) {
- struct ibv_wc wc;
- int wcs_n;
-
- DEBUG("rxq=%p, poll_length() failed (ret=%d)",
- (void *)rxq, ret);
- /* ibv_poll_cq() must be used in case of failure. */
- wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
- if (unlikely(wcs_n == 0))
- break;
- if (unlikely(wcs_n < 0)) {
- DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
- (void *)rxq, wcs_n);
- break;
- }
- assert(wcs_n == 1);
- if (unlikely(wc.status != IBV_WC_SUCCESS)) {
- /* Whatever, just repost the offending WR. */
- DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
- " completion status (%d): %s",
- (void *)rxq, wc.wr_id, wc.status,
- ibv_wc_status_str(wc.status));
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Increment dropped packets counter. */
- ++rxq->stats.idropped;
-#endif
- /* Add SGE to array for repost. */
- sges[i] = elt->sge;
- goto repost;
- }
- ret = wc.byte_len;
- }
- if (ret == 0)
- break;
- assert(ret >= (rxq->crc_present << 2));
- len = ret - (rxq->crc_present << 2);
- rep = __rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(rep == NULL)) {
- /*
- * Unable to allocate a replacement mbuf,
- * repost WR.
- */
- DEBUG("rxq=%p: can't allocate a new mbuf",
- (void *)rxq);
- /* Increment out of memory counters. */
- ++rxq->stats.rx_nombuf;
- ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
- goto repost;
+ wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t));
+ if (len > DATA_LEN(seg)) {
+ len -= DATA_LEN(seg);
+ ++NB_SEGS(pkt);
+ ++rq_ci;
+ continue;
}
-
- /* Reconfigure sge to use rep instead of seg. */
- elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM;
- assert(elt->sge.lkey == rxq->mr->lkey);
- elt->buf = rep;
-
- /* Add SGE to array for repost. */
- sges[i] = elt->sge;
-
- /* Update seg information. */
- SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM);
- NB_SEGS(seg) = 1;
- PORT(seg) = rxq->port_id;
- NEXT(seg) = NULL;
- PKT_LEN(seg) = len;
DATA_LEN(seg) = len;
- if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
- seg->packet_type = rxq_cq_to_pkt_type(flags);
- seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
- if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
- seg->ol_flags |= PKT_RX_VLAN_PKT;
- seg->vlan_tci = vlan_tci;
- }
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
- }
- /* Return packet. */
- *(pkts++) = seg;
- ++pkts_ret;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment bytes counter. */
- rxq->stats.ibytes += len;
+ rxq->stats.ibytes += PKT_LEN(pkt);
#endif
-repost:
- if (++elts_head >= elts_n)
- elts_head = 0;
- continue;
+ /* Return packet. */
+ *(pkts++) = pkt;
+ pkt = NULL;
+ --pkts_n;
+ ++i;
+skip:
+ /* Align consumer index to the next stride. */
+ rq_ci >>= sges_n;
+ ++rq_ci;
+ rq_ci <<= sges_n;
}
- if (unlikely(i == 0))
+ if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
return 0;
- /* Repost WRs. */
-#ifdef DEBUG_RECV
- DEBUG("%p: reposting %u WRs", (void *)rxq, i);
-#endif
- ret = rxq->recv(rxq->wq, sges, i);
- if (unlikely(ret)) {
- /* Inability to repost WRs is fatal. */
- DEBUG("%p: recv_burst(): failed (ret=%d)",
- (void *)rxq->priv,
- ret);
- abort();
- }
- rxq->elts_head = elts_head;
+ /* Update the consumer index. */
+ rxq->rq_ci = rq_ci >> sges_n;
+ rte_wmb();
+ *rxq->cq_db = htonl(rxq->cq_ci);
+ rte_wmb();
+ *rxq->rq_db = htonl(rxq->rq_ci);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment packets counter. */
- rxq->stats.ipackets += pkts_ret;
+ rxq->stats.ipackets += i;
#endif
- return pkts_ret;
+ return i;
}
/**
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 0e2b607d..f6e2cbac 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -43,6 +43,7 @@
#pragma GCC diagnostic ignored "-pedantic"
#endif
#include <infiniband/verbs.h>
+#include <infiniband/mlx5_hw.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -61,6 +62,7 @@
#include "mlx5.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
+#include "mlx5_prm.h"
struct mlx5_rxq_stats {
unsigned int idx; /**< Mapping index. */
@@ -81,18 +83,6 @@ struct mlx5_txq_stats {
uint64_t odropped; /**< Total of packets not sent when TX ring full. */
};
-/* RX element (scattered packets). */
-struct rxq_elt_sp {
- struct ibv_sge sges[MLX5_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
- struct rte_mbuf *bufs[MLX5_PMD_SGE_WR_N]; /* SGEs buffers. */
-};
-
-/* RX element. */
-struct rxq_elt {
- struct ibv_sge sge; /* Scatter/Gather Element. */
- struct rte_mbuf *buf; /* SGE buffer. */
-};
-
/* Flow director queue structure. */
struct fdir_queue {
struct ibv_qp *qp; /* Associated RX QP. */
@@ -101,38 +91,49 @@ struct fdir_queue {
struct priv;
+/* Compressed CQE context. */
+struct rxq_zip {
+ uint16_t ai; /* Array index. */
+ uint16_t ca; /* Current array index. */
+ uint16_t na; /* Next array index. */
+ uint16_t cq_ci; /* The next CQE. */
+ uint32_t cqe_cnt; /* Number of CQEs. */
+};
+
/* RX queue descriptor. */
struct rxq {
- struct priv *priv; /* Back pointer to private data. */
- struct rte_mempool *mp; /* Memory Pool for allocations. */
- struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_exp_wq *wq; /* Work Queue. */
- int32_t (*poll)(); /* Verbs poll function. */
- int32_t (*recv)(); /* Verbs receive function. */
- unsigned int port_id; /* Port ID for incoming packets. */
- unsigned int elts_n; /* (*elts)[] length. */
- unsigned int elts_head; /* Current index in (*elts)[]. */
- unsigned int sp:1; /* Use scattered RX elements. */
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
- union {
- struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
- struct rxq_elt (*no_sp)[]; /* RX elements. */
- } elts;
- uint32_t mb_len; /* Length of a mp-issued mbuf. */
- unsigned int socket; /* CPU socket ID for allocations. */
- struct mlx5_rxq_stats stats; /* RX queue counters. */
+ unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
+ uint16_t rq_ci;
+ uint16_t cq_ci;
+ uint16_t elts_n;
+ uint16_t cqe_n; /* Number of CQ elements. */
+ uint16_t port_id;
+ volatile struct mlx5_wqe_data_seg(*wqes)[];
+ volatile struct mlx5_cqe(*cqes)[];
+ struct rxq_zip zip; /* Compressed context. */
+ volatile uint32_t *rq_db;
+ volatile uint32_t *cq_db;
+ struct rte_mbuf *(*elts)[];
+ struct rte_mempool *mp;
+ struct mlx5_rxq_stats stats;
+} __rte_cache_aligned;
+
+/* RX queue control descriptor. */
+struct rxq_ctrl {
+ struct priv *priv; /* Back pointer to private data. */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_exp_wq *wq; /* Work Queue. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
struct fdir_queue fdir_queue; /* Flow director queue. */
struct ibv_mr *mr; /* Memory Region (for mp). */
struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
-#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ struct rxq rxq; /* Data path structure. */
};
/* Hash RX queue types. */
@@ -140,11 +141,9 @@ enum hash_rxq_type {
HASH_RXQ_TCPV4,
HASH_RXQ_UDPV4,
HASH_RXQ_IPV4,
-#ifdef HAVE_FLOW_SPEC_IPV6
HASH_RXQ_TCPV6,
HASH_RXQ_UDPV6,
HASH_RXQ_IPV6,
-#endif /* HAVE_FLOW_SPEC_IPV6 */
HASH_RXQ_ETH,
};
@@ -175,9 +174,7 @@ struct hash_rxq_init {
} hdr;
struct ibv_exp_flow_spec_tcp_udp tcp_udp;
struct ibv_exp_flow_spec_ipv4 ipv4;
-#ifdef HAVE_FLOW_SPEC_IPV6
struct ibv_exp_flow_spec_ipv6 ipv6;
-#endif /* HAVE_FLOW_SPEC_IPV6 */
struct ibv_exp_flow_spec_eth eth;
} flow_spec; /* Flow specification template. */
const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
@@ -232,74 +229,50 @@ struct hash_rxq {
struct ibv_qp *qp; /* Hash RX QP. */
enum hash_rxq_type type; /* Hash RX queue type. */
/* MAC flow steering rules, one per VLAN ID. */
- struct ibv_exp_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
+ struct ibv_exp_flow *mac_flow
+ [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
struct ibv_exp_flow *special_flow
[MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS];
};
-/* TX element. */
-struct txq_elt {
- struct rte_mbuf *buf;
-};
-
-/* Linear buffer type. It is used when transmitting buffers with too many
- * segments that do not fit the hardware queue (see max_send_sge).
- * Extra segments are copied (linearized) in such buffers, replacing the
- * last SGE during TX.
- * The size is arbitrary but large enough to hold a jumbo frame with
- * 8 segments considering mbuf.buf_len is about 2048 bytes. */
-typedef uint8_t linear_t[16384];
-
/* TX queue descriptor. */
struct txq {
- struct priv *priv; /* Back pointer to private data. */
- int32_t (*poll_cnt)(struct ibv_cq *cq, uint32_t max);
- int (*send_pending)();
-#ifdef HAVE_VERBS_VLAN_INSERTION
- int (*send_pending_vlan)();
-#endif
-#if MLX5_PMD_MAX_INLINE > 0
- int (*send_pending_inline)();
-#ifdef HAVE_VERBS_VLAN_INSERTION
- int (*send_pending_inline_vlan)();
-#endif
-#endif
-#if MLX5_PMD_SGE_WR_N > 1
- int (*send_pending_sg_list)();
-#ifdef HAVE_VERBS_VLAN_INSERTION
- int (*send_pending_sg_list_vlan)();
-#endif
-#endif
- int (*send_flush)(struct ibv_qp *qp);
- struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_qp *qp; /* Queue Pair. */
- struct txq_elt (*elts)[]; /* TX elements. */
-#if MLX5_PMD_MAX_INLINE > 0
- uint32_t max_inline; /* Max inline send size <= MLX5_PMD_MAX_INLINE. */
-#endif
- unsigned int elts_n; /* (*elts)[] length. */
- unsigned int elts_head; /* Current index in (*elts)[]. */
- unsigned int elts_tail; /* First element awaiting completion. */
- unsigned int elts_comp; /* Number of completion requests. */
- unsigned int elts_comp_cd; /* Countdown for next completion request. */
- unsigned int elts_comp_cd_init; /* Initial value for countdown. */
+ uint16_t elts_head; /* Current index in (*elts)[]. */
+ uint16_t elts_tail; /* First element awaiting completion. */
+ uint16_t elts_comp; /* Counter since last completion request. */
+ uint16_t elts_n; /* (*elts)[] length. */
+ uint16_t cq_ci; /* Consumer index for completion queue. */
+ uint16_t cqe_n; /* Number of CQ elements. */
+ uint16_t wqe_ci; /* Consumer index for work queue. */
+ uint16_t wqe_n; /* Number of WQ elements. */
+ uint16_t bf_offset; /* Blueflame offset. */
+ uint16_t bf_buf_size; /* Blueflame size. */
+ uint16_t max_inline; /* Maximum size to inline in a WQE. */
+ uint32_t qp_num_8s; /* QP number shifted by 8. */
+ volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
+ volatile union mlx5_wqe (*wqes)[]; /* Work queue. */
+ volatile uint32_t *qp_db; /* Work queue doorbell. */
+ volatile uint32_t *cq_db; /* Completion queue doorbell. */
+ volatile void *bf_reg; /* Blueflame register. */
struct {
const struct rte_mempool *mp; /* Cached Memory Pool. */
struct ibv_mr *mr; /* Memory Region (for mp). */
- uint32_t lkey; /* mr->lkey */
+ uint32_t lkey; /* htonl(mr->lkey) */
} mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
+ struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
- /* Elements used only for init part are here. */
- linear_t (*elts_linear)[]; /* Linearized buffers. */
- struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
-#ifdef HAVE_VERBS_VLAN_INSERTION
- struct ibv_exp_qp_burst_family_v1 *if_qp; /* QP burst interface. */
-#else
+} __rte_cache_aligned;
+
+/* TX queue control descriptor. */
+struct txq_ctrl {
+ struct priv *priv; /* Back pointer to private data. */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
-#endif
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
unsigned int socket; /* CPU socket ID for allocations. */
+ struct txq txq; /* Data path structure. */
};
/* mlx5_rxq.c */
@@ -316,37 +289,40 @@ int priv_create_hash_rxqs(struct priv *);
void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *);
-void rxq_cleanup(struct rxq *);
-int rxq_rehash(struct rte_eth_dev *, struct rxq *);
-int rxq_setup(struct rte_eth_dev *, struct rxq *, uint16_t, unsigned int,
- const struct rte_eth_rxconf *, struct rte_mempool *);
+void rxq_cleanup(struct rxq_ctrl *);
+int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
+int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
+ unsigned int, const struct rte_eth_rxconf *,
+ struct rte_mempool *);
int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_rxconf *, struct rte_mempool *);
void mlx5_rx_queue_release(void *);
-uint16_t mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
- uint16_t pkts_n);
-
+uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
/* mlx5_txq.c */
-void txq_cleanup(struct txq *);
-int txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf);
-
+void txq_cleanup(struct txq_ctrl *);
+int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t,
+ unsigned int, const struct rte_eth_txconf *);
int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
const struct rte_eth_txconf *);
void mlx5_tx_queue_release(void *);
-uint16_t mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts,
- uint16_t pkts_n);
+uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
/* mlx5_rxtx.c */
-struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *);
-void txq_mp2mr_iter(const struct rte_mempool *, void *);
uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_rx_burst_sp(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_tx_burst_inline(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
+/* mlx5_mr.c */
+
+struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *);
+void txq_mp2mr_iter(struct rte_mempool *, void *);
+uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int);
+
#endif /* RTE_PMD_MLX5_RXTX_H_ */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 31ce53ad..6fe61c4a 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -60,6 +60,7 @@
#endif
#include "mlx5_utils.h"
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
@@ -68,118 +69,62 @@
/**
* Allocate TX queue elements.
*
- * @param txq
+ * @param txq_ctrl
* Pointer to TX queue structure.
* @param elts_n
* Number of elements to allocate.
- *
- * @return
- * 0 on success, errno value on failure.
*/
-static int
-txq_alloc_elts(struct txq *txq, unsigned int elts_n)
+static void
+txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
{
unsigned int i;
- struct txq_elt (*elts)[elts_n] =
- rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket);
- linear_t (*elts_linear)[elts_n] =
- rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0,
- txq->socket);
- struct ibv_mr *mr_linear = NULL;
- int ret = 0;
- if ((elts == NULL) || (elts_linear == NULL)) {
- ERROR("%p: can't allocate packets array", (void *)txq);
- ret = ENOMEM;
- goto error;
- }
- mr_linear =
- ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear),
- (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE));
- if (mr_linear == NULL) {
- ERROR("%p: unable to configure MR, ibv_reg_mr() failed",
- (void *)txq);
- ret = EINVAL;
- goto error;
- }
- for (i = 0; (i != elts_n); ++i) {
- struct txq_elt *elt = &(*elts)[i];
+ for (i = 0; (i != elts_n); ++i)
+ (*txq_ctrl->txq.elts)[i] = NULL;
+ for (i = 0; (i != txq_ctrl->txq.wqe_n); ++i) {
+ volatile union mlx5_wqe *wqe = &(*txq_ctrl->txq.wqes)[i];
- elt->buf = NULL;
+ memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
}
- DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n);
- txq->elts_n = elts_n;
- txq->elts = elts;
- txq->elts_head = 0;
- txq->elts_tail = 0;
- txq->elts_comp = 0;
- /* Request send completion every MLX5_PMD_TX_PER_COMP_REQ packets or
- * at least 4 times per ring. */
- txq->elts_comp_cd_init =
- ((MLX5_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ?
- MLX5_PMD_TX_PER_COMP_REQ : (elts_n / 4));
- txq->elts_comp_cd = txq->elts_comp_cd_init;
- txq->elts_linear = elts_linear;
- txq->mr_linear = mr_linear;
- assert(ret == 0);
- return 0;
-error:
- if (mr_linear != NULL)
- claim_zero(ibv_dereg_mr(mr_linear));
-
- rte_free(elts_linear);
- rte_free(elts);
-
- DEBUG("%p: failed, freed everything", (void *)txq);
- assert(ret > 0);
- return ret;
+ DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
+ txq_ctrl->txq.elts_head = 0;
+ txq_ctrl->txq.elts_tail = 0;
+ txq_ctrl->txq.elts_comp = 0;
}
/**
* Free TX queue elements.
*
- * @param txq
+ * @param txq_ctrl
* Pointer to TX queue structure.
*/
static void
-txq_free_elts(struct txq *txq)
+txq_free_elts(struct txq_ctrl *txq_ctrl)
{
- unsigned int elts_n = txq->elts_n;
- unsigned int elts_head = txq->elts_head;
- unsigned int elts_tail = txq->elts_tail;
- struct txq_elt (*elts)[elts_n] = txq->elts;
- linear_t (*elts_linear)[elts_n] = txq->elts_linear;
- struct ibv_mr *mr_linear = txq->mr_linear;
+ unsigned int elts_n = txq_ctrl->txq.elts_n;
+ unsigned int elts_head = txq_ctrl->txq.elts_head;
+ unsigned int elts_tail = txq_ctrl->txq.elts_tail;
+ struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
- DEBUG("%p: freeing WRs", (void *)txq);
- txq->elts_n = 0;
- txq->elts_head = 0;
- txq->elts_tail = 0;
- txq->elts_comp = 0;
- txq->elts_comp_cd = 0;
- txq->elts_comp_cd_init = 0;
- txq->elts = NULL;
- txq->elts_linear = NULL;
- txq->mr_linear = NULL;
- if (mr_linear != NULL)
- claim_zero(ibv_dereg_mr(mr_linear));
+ DEBUG("%p: freeing WRs", (void *)txq_ctrl);
+ txq_ctrl->txq.elts_head = 0;
+ txq_ctrl->txq.elts_tail = 0;
+ txq_ctrl->txq.elts_comp = 0;
- rte_free(elts_linear);
- if (elts == NULL)
- return;
while (elts_tail != elts_head) {
- struct txq_elt *elt = &(*elts)[elts_tail];
+ struct rte_mbuf *elt = (*elts)[elts_tail];
- assert(elt->buf != NULL);
- rte_pktmbuf_free(elt->buf);
+ assert(elt != NULL);
+ rte_pktmbuf_free(elt);
#ifndef NDEBUG
/* Poisoning. */
- memset(elt, 0x77, sizeof(*elt));
+ memset(&(*elts)[elts_tail],
+ 0x77,
+ sizeof((*elts)[elts_tail]));
#endif
if (++elts_tail == elts_n)
elts_tail = 0;
}
- rte_free(elts);
}
/**
@@ -187,66 +132,104 @@ txq_free_elts(struct txq *txq)
*
* Destroy objects, free allocated memory and reset the structure for reuse.
*
- * @param txq
+ * @param txq_ctrl
* Pointer to TX queue structure.
*/
void
-txq_cleanup(struct txq *txq)
+txq_cleanup(struct txq_ctrl *txq_ctrl)
{
struct ibv_exp_release_intf_params params;
size_t i;
- DEBUG("cleaning up %p", (void *)txq);
- txq_free_elts(txq);
- txq->poll_cnt = NULL;
-#if MLX5_PMD_MAX_INLINE > 0
- txq->send_pending_inline = NULL;
-#endif
- txq->send_flush = NULL;
- if (txq->if_qp != NULL) {
- assert(txq->priv != NULL);
- assert(txq->priv->ctx != NULL);
- assert(txq->qp != NULL);
+ DEBUG("cleaning up %p", (void *)txq_ctrl);
+ txq_free_elts(txq_ctrl);
+ if (txq_ctrl->if_qp != NULL) {
+ assert(txq_ctrl->priv != NULL);
+ assert(txq_ctrl->priv->ctx != NULL);
+ assert(txq_ctrl->qp != NULL);
params = (struct ibv_exp_release_intf_params){
.comp_mask = 0,
};
- claim_zero(ibv_exp_release_intf(txq->priv->ctx,
- txq->if_qp,
+ claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
+ txq_ctrl->if_qp,
&params));
}
- if (txq->if_cq != NULL) {
- assert(txq->priv != NULL);
- assert(txq->priv->ctx != NULL);
- assert(txq->cq != NULL);
+ if (txq_ctrl->if_cq != NULL) {
+ assert(txq_ctrl->priv != NULL);
+ assert(txq_ctrl->priv->ctx != NULL);
+ assert(txq_ctrl->cq != NULL);
params = (struct ibv_exp_release_intf_params){
.comp_mask = 0,
};
- claim_zero(ibv_exp_release_intf(txq->priv->ctx,
- txq->if_cq,
+ claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
+ txq_ctrl->if_cq,
&params));
}
- if (txq->qp != NULL)
- claim_zero(ibv_destroy_qp(txq->qp));
- if (txq->cq != NULL)
- claim_zero(ibv_destroy_cq(txq->cq));
- if (txq->rd != NULL) {
+ if (txq_ctrl->qp != NULL)
+ claim_zero(ibv_destroy_qp(txq_ctrl->qp));
+ if (txq_ctrl->cq != NULL)
+ claim_zero(ibv_destroy_cq(txq_ctrl->cq));
+ if (txq_ctrl->rd != NULL) {
struct ibv_exp_destroy_res_domain_attr attr = {
.comp_mask = 0,
};
- assert(txq->priv != NULL);
- assert(txq->priv->ctx != NULL);
- claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx,
- txq->rd,
+ assert(txq_ctrl->priv != NULL);
+ assert(txq_ctrl->priv->ctx != NULL);
+ claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx,
+ txq_ctrl->rd,
&attr));
}
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (txq->mp2mr[i].mp == NULL)
+ for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
+ if (txq_ctrl->txq.mp2mr[i].mp == NULL)
break;
- assert(txq->mp2mr[i].mr != NULL);
- claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
+ assert(txq_ctrl->txq.mp2mr[i].mr != NULL);
+ claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
+ }
+ memset(txq_ctrl, 0, sizeof(*txq_ctrl));
+}
+
+/**
+ * Initialize TX queue.
+ *
+ * @param tmpl
+ * Pointer to TX queue control template.
+ * @param txq_ctrl
+ * Pointer to TX queue control.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static inline int
+txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
+{
+ struct mlx5_qp *qp = to_mqp(tmpl->qp);
+ struct ibv_cq *ibcq = tmpl->cq;
+ struct mlx5_cq *cq = to_mxxx(cq, cq);
+
+ if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) {
+ ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
+ "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ return EINVAL;
}
- memset(txq, 0, sizeof(*txq));
+ tmpl->txq.cqe_n = ibcq->cqe + 1;
+ tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
+ tmpl->txq.wqes =
+ (volatile union mlx5_wqe (*)[])
+ (uintptr_t)qp->gen_data.sqstart;
+ tmpl->txq.wqe_n = qp->sq.wqe_cnt;
+ tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
+ tmpl->txq.bf_reg = qp->gen_data.bf->reg;
+ tmpl->txq.bf_offset = qp->gen_data.bf->offset;
+ tmpl->txq.bf_buf_size = qp->gen_data.bf->buf_size;
+ tmpl->txq.cq_db = cq->dbrec;
+ tmpl->txq.cqes =
+ (volatile struct mlx5_cqe (*)[])
+ (uintptr_t)cq->active_buf->buf;
+ tmpl->txq.elts =
+ (struct rte_mbuf *(*)[tmpl->txq.elts_n])
+ ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
+ return 0;
}
/**
@@ -254,7 +237,7 @@ txq_cleanup(struct txq *txq)
*
* @param dev
* Pointer to Ethernet device structure.
- * @param txq
+ * @param txq_ctrl
* Pointer to TX queue structure.
* @param desc
* Number of descriptors to configure in queue.
@@ -267,13 +250,14 @@ txq_cleanup(struct txq *txq)
* 0 on success, errno value on failure.
*/
int
-txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf)
+txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf)
{
struct priv *priv = mlx5_get_priv(dev);
- struct txq tmpl = {
+ struct txq_ctrl tmpl = {
.priv = priv,
- .socket = socket
+ .socket = socket,
};
union {
struct ibv_exp_query_intf_params params;
@@ -281,17 +265,19 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
struct ibv_exp_res_domain_init_attr rd;
struct ibv_exp_cq_init_attr cq;
struct ibv_exp_qp_attr mod;
+ struct ibv_exp_cq_attr cq_attr;
} attr;
enum ibv_exp_query_intf_status status;
int ret = 0;
- (void)conf; /* Thresholds configuration (ignored). */
- if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) {
- ERROR("%p: invalid number of TX descriptors (must be a"
- " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N);
- return EINVAL;
+ if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
+ ret = ENOTSUP;
+ ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
+ goto error;
}
- desc /= MLX5_PMD_SGE_WR_N;
+ (void)conf; /* Thresholds configuration (ignored). */
+ assert(desc > MLX5_TX_COMP_THRESH);
+ tmpl.txq.elts_n = desc;
/* MRs will be registered in mp2mr[] later. */
attr.rd = (struct ibv_exp_res_domain_init_attr){
.comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
@@ -310,7 +296,10 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
.res_domain = tmpl.rd,
};
- tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
+ tmpl.cq = ibv_exp_create_cq(priv->ctx,
+ (((desc / MLX5_TX_COMP_THRESH) - 1) ?
+ ((desc / MLX5_TX_COMP_THRESH) - 1) : 1),
+ NULL, NULL, 0, &attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
ERROR("%p: CQ creation failure: %s",
@@ -331,14 +320,14 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
.max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
priv->device_attr.max_qp_wr :
desc),
- /* Max number of scatter/gather elements in a WR. */
- .max_send_sge = ((priv->device_attr.max_sge <
- MLX5_PMD_SGE_WR_N) ?
- priv->device_attr.max_sge :
- MLX5_PMD_SGE_WR_N),
-#if MLX5_PMD_MAX_INLINE > 0
- .max_inline_data = MLX5_PMD_MAX_INLINE,
-#endif
+ /*
+ * Max number of scatter/gather elements in a WR,
+ * must be 1 to prevent libmlx5 from trying to affect
+ * too much memory. TX gather is not impacted by the
+ * priv->device_attr.max_sge limit and will still work
+ * properly.
+ */
+ .max_send_sge = 1,
},
.qp_type = IBV_QPT_RAW_PACKET,
/* Do *NOT* enable this, completions events are managed per
@@ -349,6 +338,10 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
.comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
};
+ if (priv->txq_inline && priv->txqs_n >= priv->txqs_inline) {
+ tmpl.txq.max_inline = priv->txq_inline;
+ attr.init.cap.max_inline_data = tmpl.txq.max_inline;
+ }
tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ret = (errno ? errno : EINVAL);
@@ -356,10 +349,11 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
(void *)dev, strerror(ret));
goto error;
}
-#if MLX5_PMD_MAX_INLINE > 0
- /* ibv_create_qp() updates this value. */
- tmpl.max_inline = attr.init.cap.max_inline_data;
-#endif
+ DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u,"
+ " max_inline_data=%u",
+ attr.init.cap.max_send_wr,
+ attr.init.cap.max_send_sge,
+ attr.init.cap.max_inline_data);
attr.mod = (struct ibv_exp_qp_attr){
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
@@ -373,12 +367,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
(void *)dev, strerror(ret));
goto error;
}
- ret = txq_alloc_elts(&tmpl, desc);
+ ret = txq_setup(&tmpl, txq_ctrl);
if (ret) {
- ERROR("%p: TXQ allocation failed: %s",
+ ERROR("%p: cannot initialize TX queue structure: %s",
(void *)dev, strerror(ret));
goto error;
}
+ txq_alloc_elts(&tmpl, desc);
attr.mod = (struct ibv_exp_qp_attr){
.qp_state = IBV_QPS_RTR
};
@@ -410,17 +405,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
attr.params = (struct ibv_exp_query_intf_params){
.intf_scope = IBV_EXP_INTF_GLOBAL,
.intf = IBV_EXP_INTF_QP_BURST,
- .obj = tmpl.qp,
-#ifdef HAVE_VERBS_VLAN_INSERTION
.intf_version = 1,
-#endif
-#ifdef HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR
+ .obj = tmpl.qp,
/* Enable multi-packet send if supported. */
.family_flags =
- (priv->mps ?
+ ((priv->mps && !priv->sriov) ?
IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR :
0),
-#endif
};
tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
if (tmpl.if_qp == NULL) {
@@ -430,30 +421,12 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
goto error;
}
/* Clean up txq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
- txq_cleanup(txq);
- *txq = tmpl;
- txq->poll_cnt = txq->if_cq->poll_cnt;
-#if MLX5_PMD_MAX_INLINE > 0
- txq->send_pending_inline = txq->if_qp->send_pending_inline;
-#ifdef HAVE_VERBS_VLAN_INSERTION
- txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan;
-#endif
-#endif
-#if MLX5_PMD_SGE_WR_N > 1
- txq->send_pending_sg_list = txq->if_qp->send_pending_sg_list;
-#ifdef HAVE_VERBS_VLAN_INSERTION
- txq->send_pending_sg_list_vlan = txq->if_qp->send_pending_sg_list_vlan;
-#endif
-#endif
- txq->send_pending = txq->if_qp->send_pending;
-#ifdef HAVE_VERBS_VLAN_INSERTION
- txq->send_pending_vlan = txq->if_qp->send_pending_vlan;
-#endif
- txq->send_flush = txq->if_qp->send_flush;
- DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
+ DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
+ txq_cleanup(txq_ctrl);
+ *txq_ctrl = tmpl;
+ DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
/* Pre-register known mempools. */
- rte_mempool_walk(txq_mp2mr_iter, txq);
+ rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
assert(ret == 0);
return 0;
error:
@@ -485,12 +458,26 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
{
struct priv *priv = dev->data->dev_private;
struct txq *txq = (*priv->txqs)[idx];
+ struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
int ret;
if (mlx5_is_secondary())
return -E_RTE_SECONDARY;
priv_lock(priv);
+ if (desc <= MLX5_TX_COMP_THRESH) {
+ WARN("%p: number of descriptors requested for TX queue %u"
+ " must be higher than MLX5_TX_COMP_THRESH, using"
+ " %u instead of %u",
+ (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
+ desc = MLX5_TX_COMP_THRESH + 1;
+ }
+ if (!rte_is_power_of_2(desc)) {
+ desc = 1 << log2above(desc);
+ WARN("%p: increased number of descriptors in TX queue %u"
+ " to the next power of two (%d)",
+ (void *)dev, idx, desc);
+ }
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
if (idx >= priv->txqs_n) {
@@ -507,26 +494,30 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
return -EEXIST;
}
(*priv->txqs)[idx] = NULL;
- txq_cleanup(txq);
+ txq_cleanup(txq_ctrl);
} else {
- txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
- if (txq == NULL) {
+ txq_ctrl =
+ rte_calloc_socket("TXQ", 1,
+ sizeof(*txq_ctrl) +
+ desc * sizeof(struct rte_mbuf *),
+ 0, socket);
+ if (txq_ctrl == NULL) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
priv_unlock(priv);
return -ENOMEM;
}
}
- ret = txq_setup(dev, txq, desc, socket, conf);
+ ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
if (ret)
- rte_free(txq);
+ rte_free(txq_ctrl);
else {
- txq->stats.idx = idx;
+ txq_ctrl->txq.stats.idx = idx;
DEBUG("%p: adding TX queue %p to list",
- (void *)dev, (void *)txq);
- (*priv->txqs)[idx] = txq;
+ (void *)dev, (void *)txq_ctrl);
+ (*priv->txqs)[idx] = &txq_ctrl->txq;
/* Update send callback. */
- dev->tx_pkt_burst = mlx5_tx_burst;
+ priv_select_tx_function(priv);
}
priv_unlock(priv);
return -ret;
@@ -542,6 +533,7 @@ void
mlx5_tx_queue_release(void *dpdk_txq)
{
struct txq *txq = (struct txq *)dpdk_txq;
+ struct txq_ctrl *txq_ctrl;
struct priv *priv;
unsigned int i;
@@ -550,17 +542,18 @@ mlx5_tx_queue_release(void *dpdk_txq)
if (txq == NULL)
return;
- priv = txq->priv;
+ txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ priv = txq_ctrl->priv;
priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
DEBUG("%p: removing TX queue %p from list",
- (void *)priv->dev, (void *)txq);
+ (void *)priv->dev, (void *)txq_ctrl);
(*priv->txqs)[i] = NULL;
break;
}
- txq_cleanup(txq);
- rte_free(txq);
+ txq_cleanup(txq_ctrl);
+ rte_free(txq_ctrl);
priv_unlock(priv);
}
@@ -585,7 +578,8 @@ mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
struct txq *txq = dpdk_txq;
- struct priv *priv = mlx5_secondary_data_setup(txq->priv);
+ struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
+ struct priv *priv = mlx5_secondary_data_setup(txq_ctrl->priv);
struct priv *primary_priv;
unsigned int index;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index ea7af1e4..4719e697 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -144,7 +144,7 @@ static void
priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
{
struct rxq *rxq = (*priv->rxqs)[idx];
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
struct ibv_exp_wq_attr mod;
uint16_t vlan_offloads =
(on ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0) |
@@ -158,15 +158,13 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
.vlan_offloads = vlan_offloads,
};
- err = ibv_exp_modify_wq(rxq->wq, &mod);
+ err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
if (err) {
ERROR("%p: failed to modified stripping mode: %s",
(void *)priv, strerror(err));
return;
}
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
-
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
}
@@ -218,7 +216,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
unsigned int i;
if (mask & ETH_VLAN_STRIP_MASK) {
- int hw_vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip;
+ int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
if (!priv->hw_vlan_strip) {
ERROR("VLAN stripping is not supported");
diff --git a/drivers/net/mpipe/Makefile b/drivers/net/mpipe/Makefile
index 46f046d5..846e2e07 100644
--- a/drivers/net/mpipe/Makefile
+++ b/drivers/net/mpipe/Makefile
@@ -42,6 +42,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe_tilegx.c
DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_net lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_net
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mpipe/mpipe_tilegx.c b/drivers/net/mpipe/mpipe_tilegx.c
index adcbc19e..26e14248 100644
--- a/drivers/net/mpipe/mpipe_tilegx.c
+++ b/drivers/net/mpipe/mpipe_tilegx.c
@@ -516,7 +516,7 @@ mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
int i;
for (i = 0; i < count; i++) {
- mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
+ mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
if (!mbuf)
break;
mpipe_recv_push(priv, mbuf);
@@ -1452,7 +1452,7 @@ mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
MPIPE_BUF_DEBT_THRESHOLD)
mpipe_local.mbuf_push_debt[in_port]++;
else {
- mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
+ mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
if (unlikely(!mbuf)) {
nb_nomem++;
gxio_mpipe_iqueue_drop(iqueue, idesc);
diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile
index 1dddd1fd..4cadd131 100644
--- a/drivers/net/nfp/Makefile
+++ b/drivers/net/nfp/Makefile
@@ -53,6 +53,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_net lib/librte_malloc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_net
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index bcf5fa99..6afd49b1 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -54,6 +54,7 @@
#include <rte_version.h>
#include <rte_string_fns.h>
#include <rte_alarm.h>
+#include <rte_spinlock.h>
#include "nfp_net_pmd.h"
#include "nfp_net_logs.h"
@@ -322,7 +323,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
for (i = 0; i < txq->tx_count; i++) {
if (txq->txbufs[i].mbuf) {
- rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
+ rte_pktmbuf_free(txq->txbufs[i].mbuf);
txq->txbufs[i].mbuf = NULL;
}
}
@@ -407,6 +408,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
ctrl, update);
+ rte_spinlock_lock(&hw->reconfig_lock);
+
nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
@@ -414,6 +417,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
err = __nfp_net_reconfig(hw, update);
+ rte_spinlock_unlock(&hw->reconfig_lock);
+
if (!err)
return 0;
@@ -902,11 +907,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
- nfp_dev_stats.imcasts =
- nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
-
- nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts;
-
/* reading general device stats */
nfp_dev_stats.ierrors =
nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
@@ -918,12 +918,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
- /* Multicast frames received */
- nfp_dev_stats.imcasts =
- nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
-
- nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts;
-
/* RX ring mbuf allocation failures */
nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
@@ -985,9 +979,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)
hw->eth_stats_base.obytes =
nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
- hw->eth_stats_base.imcasts =
- nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
-
/* reading general device stats */
hw->eth_stats_base.ierrors =
nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
@@ -995,10 +986,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)
hw->eth_stats_base.oerrors =
nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
- /* Multicast frames received */
- hw->eth_stats_base.imcasts =
- nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
-
/* RX ring mbuf allocation failures */
dev->data->rx_mbuf_alloc_failed = 0;
@@ -1813,7 +1800,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
- mb->ol_flags |= PKT_RX_VLAN_PKT;
+ mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
}
/* Adding the mbuff to the mbuff array passed by the app */
@@ -1989,11 +1976,16 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
*/
pkt_size = pkt->pkt_len;
- while (pkt_size) {
- /* Releasing mbuf which was prefetched above */
- if (*lmbuf)
- rte_pktmbuf_free_seg(*lmbuf);
+ /* Releasing mbuf which was prefetched above */
+ if (*lmbuf)
+ rte_pktmbuf_free(*lmbuf);
+ /*
+ * Linking mbuf with descriptor for being released
+ * next time descriptor is used
+ */
+ *lmbuf = pkt;
+ while (pkt_size) {
dma_size = pkt->data_len;
dma_addr = rte_mbuf_data_dma_addr(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
@@ -2007,12 +1999,6 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
ASSERT(free_descs > 0);
free_descs--;
- /*
- * Linking mbuf with descriptor for being released
- * next time descriptor is used
- */
- *lmbuf = pkt;
-
txq->wr_p++;
txq->tail++;
if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/
@@ -2417,6 +2403,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
hw->max_rx_queues, hw->max_tx_queues);
+ /* Initializing spinlock for reconfigs */
+ rte_spinlock_init(&hw->reconfig_lock);
+
/* Allocating memory for mac addr */
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
@@ -2457,16 +2446,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
static struct rte_pci_id pci_id_nfp_net_map[] = {
{
- .vendor_id = PCI_VENDOR_ID_NETRONOME,
- .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID,
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+ PCI_DEVICE_ID_NFP6000_PF_NIC)
},
{
- .vendor_id = PCI_VENDOR_ID_NETRONOME,
- .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC,
- .subsystem_vendor_id = PCI_ANY_ID,
- .subsystem_device_id = PCI_ANY_ID,
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+ PCI_DEVICE_ID_NFP6000_VF_NIC)
},
{
.vendor_id = 0,
@@ -2477,7 +2462,8 @@ static struct eth_driver rte_nfp_net_pmd = {
{
.name = "rte_nfp_net_pmd",
.id_table = pci_id_nfp_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = nfp_net_init,
.dev_private_size = sizeof(struct nfp_net_adapter),
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index 232ce5ca..c1809720 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -406,6 +406,7 @@ struct nfp_net_hw {
int stride_tx;
uint8_t *qcp_cfg;
+ rte_spinlock_t reconfig_lock;
uint32_t max_tx_queues;
uint32_t max_rx_queues;
diff --git a/drivers/net/null/Makefile b/drivers/net/null/Makefile
index 22023891..0c909c6f 100644
--- a/drivers/net/null/Makefile
+++ b/drivers/net/null/Makefile
@@ -54,7 +54,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += rte_eth_null.c
SYMLINK-y-include += rte_eth_null.h
# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_kvargs
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 5e8e203c..ab440f3b 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -69,6 +69,7 @@ struct null_queue {
struct pmd_internals {
unsigned packet_size;
unsigned packet_copy;
+ uint8_t port_id;
struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
@@ -114,6 +115,7 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
bufs[i]->pkt_len = packet_size;
bufs[i]->nb_segs = 1;
bufs[i]->next = NULL;
+ bufs[i]->port = h->internals->port_id;
}
rte_atomic64_add(&(h->rx_pkts), i);
@@ -142,6 +144,7 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
bufs[i]->pkt_len = packet_size;
bufs[i]->nb_segs = 1;
bufs[i]->next = NULL;
+ bufs[i]->port = h->internals->port_id;
}
rte_atomic64_add(&(h->rx_pkts), i);
@@ -529,6 +532,7 @@ eth_dev_null_create(const char *name,
internals->packet_size = packet_size;
internals->packet_copy = packet_copy;
+ internals->port_id = eth_dev->data->port_id;
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
diff --git a/drivers/net/pcap/Makefile b/drivers/net/pcap/Makefile
index b41d8a27..89ac4024 100644
--- a/drivers/net/pcap/Makefile
+++ b/drivers/net/pcap/Makefile
@@ -56,7 +56,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c
SYMLINK-y-include +=
# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_kvargs
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index c98e2341..c86f17b6 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -978,8 +978,8 @@ rte_pmd_pcap_devinit(const char *name, const char *params)
unsigned numa_node, using_dumpers = 0;
int ret;
struct rte_kvargs *kvlist;
- struct rx_pcaps pcaps;
- struct tx_pcaps dumpers;
+ struct rx_pcaps pcaps = {0};
+ struct tx_pcaps dumpers = {0};
RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
diff --git a/drivers/net/qede/LICENSE.qede_pmd b/drivers/net/qede/LICENSE.qede_pmd
new file mode 100644
index 00000000..c7cbdccc
--- /dev/null
+++ b/drivers/net/qede/LICENSE.qede_pmd
@@ -0,0 +1,28 @@
+/*
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of QLogic Corporation nor the name of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written consent.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
new file mode 100644
index 00000000..fe449aa9
--- /dev/null
+++ b/drivers/net/qede/Makefile
@@ -0,0 +1,106 @@
+# Copyright (c) 2016 QLogic Corporation.
+# All rights reserved.
+# www.qlogic.com
+#
+# See LICENSE.qede_pmd for copyright and licensing details.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_qede.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lz
+
+EXPORT_MAP := rte_pmd_qede_version.map
+
+LIBABIVER := 1
+
+#
+# OS
+#
+OS_TYPE := $(shell uname -s)
+
+#
+# CFLAGS
+#
+CFLAGS_BASE_DRIVER = -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
+CFLAGS_BASE_DRIVER += -Wno-cast-qual
+CFLAGS_BASE_DRIVER += -Wno-unused-function
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+CFLAGS_BASE_DRIVER += -Wno-strict-aliasing
+CFLAGS_BASE_DRIVER += -Wno-missing-prototypes
+
+ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER += -Wno-unused-value
+CFLAGS_BASE_DRIVER += -Wno-format-nonliteral
+ifeq ($(OS_TYPE),Linux)
+ifeq ($(shell clang -Wno-shift-negative-value -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+endif
+endif
+endif
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+CFLAGS_BASE_DRIVER += -Wno-missing-declarations
+CFLAGS_BASE_DRIVER += -Wno-maybe-uninitialized
+CFLAGS_BASE_DRIVER += -Wno-strict-prototypes
+ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+endif
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS_BASE_DRIVER += -Wno-format-extra-args
+CFLAGS_BASE_DRIVER += -Wno-visibility
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+CFLAGS_BASE_DRIVER += -Wno-invalid-source-encoding
+CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized
+ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
+endif
+else
+CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type
+endif
+
+#
+# Add extra flags for base ecore driver files
+# to disable warnings in them
+#
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_BASE_DRIVER)))
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_l2.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dcbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sriov.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
+
+# dependent libs:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_net
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
new file mode 100644
index 00000000..16029b58
--- /dev/null
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <zlib.h>
+
+#include <rte_memzone.h>
+#include <rte_errno.h>
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_iov_api.h"
+#include "ecore_mcp_api.h"
+#include "ecore_l2_api.h"
+
+
+unsigned long qede_log2_align(unsigned long n)
+{
+ unsigned long ret = n ? 1 : 0;
+ unsigned long _n = n >> 1;
+
+ while (_n) {
+ _n >>= 1;
+ ret <<= 1;
+ }
+
+ if (ret < n)
+ ret <<= 1;
+
+ return ret;
+}
+
+u32 qede_osal_log2(u32 val)
+{
+ u32 log = 0;
+
+ while (val >>= 1)
+ log++;
+
+ return log;
+}
+
+inline void qede_set_bit(u32 nr, unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+inline void qede_clr_bit(u32 nr, unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+inline bool qede_test_bit(u32 nr, unsigned long *addr)
+{
+ bool res;
+
+ rte_mb();
+ res = ((*addr) & (1UL << nr)) != 0;
+ rte_mb();
+ return res;
+}
+
+static inline u32 qede_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
+}
+
+inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
+{
+ u32 i;
+ u32 nwords = 0;
+ OSAL_BUILD_BUG_ON(!limit);
+ nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
+ for (i = 0; i < nwords; i++)
+ if (~(addr[i] != 0))
+ break;
+ return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
+}
+
+void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
+ __rte_unused struct vf_pf_resc_request *resc_req,
+ struct ecore_vf_acquire_sw_info *vf_sw_info)
+{
+ vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
+ vf_sw_info->override_fw_version = 1;
+}
+
+void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
+ dma_addr_t *phys, size_t size)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t core_id = rte_lcore_id();
+ unsigned int socket_id;
+
+ OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ if (core_id == (unsigned int)LCORE_ID_ANY)
+ core_id = 0;
+ socket_id = rte_lcore_to_socket_id(core_id);
+ mz = rte_memzone_reserve_aligned(mz_name, size,
+ socket_id, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(p_dev, "Unable to allocate DMA memory "
+ "of size %zu bytes - %s\n",
+ size, rte_strerror(rte_errno));
+ *phys = 0;
+ return OSAL_NULL;
+ }
+ *phys = mz->phys_addr;
+ DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
+ "size=%zu phys=0x%" PRIx64 " virt=%p on socket=%u\n",
+ mz->len, mz->phys_addr, mz->addr, socket_id);
+ return mz->addr;
+}
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
+ dma_addr_t *phys, size_t size, int align)
+{
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t core_id = rte_lcore_id();
+ unsigned int socket_id;
+
+ OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ if (core_id == (unsigned int)LCORE_ID_ANY)
+ core_id = 0;
+ socket_id = rte_lcore_to_socket_id(core_id);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align);
+ if (!mz) {
+ DP_ERR(p_dev, "Unable to allocate DMA memory "
+ "of size %zu bytes - %s\n",
+ size, rte_strerror(rte_errno));
+ *phys = 0;
+ return OSAL_NULL;
+ }
+ *phys = mz->phys_addr;
+ DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
+ "aligned memory size=%zu phys=0x%" PRIx64 " virt=%p core=%d\n",
+ mz->len, mz->phys_addr, mz->addr, core_id);
+ return mz->addr;
+}
+
+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+ int rc;
+
+ p_hwfn->stream->next_in = input_buf;
+ p_hwfn->stream->avail_in = input_len;
+ p_hwfn->stream->next_out = unzip_buf;
+ p_hwfn->stream->avail_out = max_size;
+
+ rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+ if (rc != Z_OK) {
+ DP_ERR(p_hwfn,
+ "zlib init failed, rc = %d\n", rc);
+ return 0;
+ }
+
+ rc = inflate(p_hwfn->stream, Z_FINISH);
+ inflateEnd(p_hwfn->stream);
+
+ if (rc != Z_OK && rc != Z_STREAM_END) {
+ DP_ERR(p_hwfn,
+ "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
+ rc);
+ return 0;
+ }
+
+ return p_hwfn->stream->total_out / 4;
+}
+
+void
+qede_get_mcp_proto_stats(struct ecore_dev *edev,
+ enum ecore_mcp_protocol_type type,
+ union ecore_mcp_protocol_stats *stats)
+{
+ struct ecore_eth_stats lan_stats;
+
+ if (type == ECORE_MCP_LAN_STATS) {
+ ecore_get_vport_stats(edev, &lan_stats);
+ stats->lan_stats.ucast_rx_pkts = lan_stats.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = lan_stats.tx_ucast_pkts;
+ stats->lan_stats.fcs_err = -1;
+ } else {
+ DP_INFO(edev, "Statistics request type %d not supported\n",
+ type);
+ }
+}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
new file mode 100644
index 00000000..3e2aeb03
--- /dev/null
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -0,0 +1,403 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __BCM_OSAL_H
+#define __BCM_OSAL_H
+
+#include <rte_byteorder.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_memcpy.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+
+/* Forward declaration */
+struct ecore_dev;
+struct ecore_hwfn;
+struct ecore_vf_acquire_sw_info;
+struct vf_pf_resc_request;
+enum ecore_mcp_protocol_type;
+union ecore_mcp_protocol_stats;
+
+void qed_link_update(struct ecore_hwfn *hwfn);
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#undef __BIG_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN
+#endif
+#else
+#undef __LITTLE_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN
+#endif
+#endif
+
+/* Memory Types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef int16_t s16;
+typedef int32_t s32;
+
+typedef u16 __le16;
+typedef u32 __le32;
+typedef u32 OSAL_BE32;
+
+#define osal_uintptr_t uintptr_t
+
+typedef phys_addr_t dma_addr_t;
+
+typedef rte_spinlock_t osal_spinlock_t;
+
+typedef void *osal_dpc_t;
+
+typedef size_t osal_size_t;
+
+typedef intptr_t osal_int_ptr_t;
+
+typedef int bool;
+#define true 1
+#define false 0
+
+#define nothing do {} while (0)
+
+/* Delays */
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000 * (x))
+#define OSAL_UDELAY(time) usec_delay(time)
+#define OSAL_MSLEEP(time) msec_delay(time)
+
+/* Memory allocations and deallocations */
+
+#define OSAL_NULL ((void *)0)
+#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
+#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
+#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
+#define OSAL_VALLOC(dev, size) rte_malloc("qede", size, 0)
+#define OSAL_FREE(dev, memory) rte_free((void *)memory)
+#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
+#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
+#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
+#define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size)
+#define OSAL_MEMSET(dst, val, length) \
+ memset(dst, val, length)
+
+void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
+ size_t, int);
+
+#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
+ osal_dma_alloc_coherent(dev, phys, size)
+
+#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
+ osal_dma_alloc_coherent_aligned(dev, phys, size, align)
+
+/* TODO: */
+#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
+
+/* HW reads/writes */
+
+#define DIRECT_REG_RD(_dev, _reg_addr) \
+ (*((volatile u32 *) (_reg_addr)))
+
+#define REG_RD(_p_hwfn, _reg_offset) \
+ DIRECT_REG_RD(_p_hwfn, \
+ ((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
+
+#define DIRECT_REG_WR16(_reg_addr, _val) \
+ (*((volatile u16 *)(_reg_addr)) = _val)
+
+#define DIRECT_REG_WR(_dev, _reg_addr, _val) \
+ (*((volatile u32 *)(_reg_addr)) = _val)
+
+#define REG_WR(_p_hwfn, _reg_offset, _val) \
+ DIRECT_REG_WR(NULL, \
+ ((u8 *)((uintptr_t)(_p_hwfn->regview)) + (_reg_offset)), (u32)_val)
+
+#define REG_WR16(_p_hwfn, _reg_offset, _val) \
+ DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \
+ (_reg_offset)), (u16)_val)
+
+#define DOORBELL(_p_hwfn, _db_addr, _val) \
+ DIRECT_REG_WR(_p_hwfn, \
+ ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + (_db_addr)), (u32)_val)
+
+/* Mutexes */
+
+typedef pthread_mutex_t osal_mutex_t;
+#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
+#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
+#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
+#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
+#define OSAL_MUTEX_DEALLOC(lock) nothing
+
+/* Spinlocks */
+
+#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock)
+#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock)
+#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
+#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
+#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing
+
+/* DPC */
+
+#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
+#define OSAL_DPC_INIT(dpc, hwfn) nothing
+#define OSAL_POLL_MODE_DPC(hwfn) nothing
+
+/* Lists */
+
+#define OSAL_LIST_SPLICE_INIT(new_list, list) nothing
+#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) nothing
+
+typedef struct _osal_list_entry_t {
+ struct _osal_list_entry_t *next, *prev;
+} osal_list_entry_t;
+
+typedef struct osal_list_t {
+ osal_list_entry_t *head, *tail;
+ unsigned long cnt;
+} osal_list_t;
+
+#define OSAL_LIST_INIT(list) \
+ do { \
+ (list)->head = NULL; \
+ (list)->tail = NULL; \
+ (list)->cnt = 0; \
+ } while (0)
+
+#define OSAL_LIST_PUSH_HEAD(entry, list) \
+ do { \
+ (entry)->prev = (osal_list_entry_t *)0; \
+ (entry)->next = (list)->head; \
+ if ((list)->tail == (osal_list_entry_t *)0) { \
+ (list)->tail = (entry); \
+ } else { \
+ (list)->head->prev = (entry); \
+ } \
+ (list)->head = (entry); \
+ (list)->cnt++; \
+ } while (0)
+
+#define OSAL_LIST_PUSH_TAIL(entry, list) \
+ do { \
+ (entry)->next = (osal_list_entry_t *)0; \
+ (entry)->prev = (list)->tail; \
+ if ((list)->tail) { \
+ (list)->tail->next = (entry); \
+ } else { \
+ (list)->head = (entry); \
+ } \
+ (list)->tail = (entry); \
+ (list)->cnt++; \
+ } while (0)
+
+#define OSAL_LIST_FIRST_ENTRY(list, type, field) \
+ (type *)((list)->head)
+
+#define OSAL_LIST_REMOVE_ENTRY(entry, list) \
+ do { \
+ if ((list)->head == (entry)) { \
+ if ((list)->head) { \
+ (list)->head = (list)->head->next; \
+ if ((list)->head) { \
+ (list)->head->prev = (osal_list_entry_t *)0;\
+ } else { \
+ (list)->tail = (osal_list_entry_t *)0; \
+ } \
+ (list)->cnt--; \
+ } \
+ } else if ((list)->tail == (entry)) { \
+ if ((list)->tail) { \
+ (list)->tail = (list)->tail->prev; \
+ if ((list)->tail) { \
+ (list)->tail->next = (osal_list_entry_t *)0;\
+ } else { \
+ (list)->head = (osal_list_entry_t *)0; \
+ } \
+ (list)->cnt--; \
+ } \
+ } else { \
+ (entry)->prev->next = (entry)->next; \
+ (entry)->next->prev = (entry)->prev; \
+ (list)->cnt--; \
+ } \
+ } while (0)
+
+#define OSAL_LIST_IS_EMPTY(list) \
+ ((list)->cnt == 0)
+
+#define OSAL_LIST_NEXT(entry, field, type) \
+ (type *)((&((entry)->field))->next)
+
+/* TODO: Check field, type order */
+
+#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \
+ for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \
+ entry; \
+ entry = OSAL_LIST_NEXT(entry, field, type))
+
+#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \
+ for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \
+ tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL; \
+ entry != NULL; \
+ entry = (type *)tmp_entry, \
+ tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL)
+
+/* TODO: OSAL_LIST_INSERT_ENTRY_AFTER */
+#define OSAL_LIST_INSERT_ENTRY_AFTER(new_entry, entry, list) \
+ OSAL_LIST_PUSH_HEAD(new_entry, list)
+
+/* PCI config space */
+
+#define OSAL_PCI_READ_CONFIG_BYTE(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_WORD(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_DWORD(dev, address, dst) nothing
+#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_FIND_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_WRITE_CONFIG_WORD(dev, address, val) nothing
+#define OSAL_BAR_SIZE(dev, bar_id) 0
+
+/* Barriers */
+
+#define OSAL_MMIOWB(dev) rte_wmb()
+#define OSAL_BARRIER(dev) rte_compiler_barrier()
+#define OSAL_SMP_RMB(dev) rte_rmb()
+#define OSAL_SMP_WMB(dev) rte_wmb()
+#define OSAL_RMB(dev) rte_rmb()
+#define OSAL_WMB(dev) rte_wmb()
+#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
+
+#define OSAL_BITS_PER_BYTE (8)
+#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE)
+#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1)
+
+/* Bitops */
+void qede_set_bit(u32, unsigned long *);
+#define OSAL_SET_BIT(bit, bitmap) \
+ qede_set_bit(bit, bitmap)
+
+void qede_clr_bit(u32, unsigned long *);
+#define OSAL_CLEAR_BIT(bit, bitmap) \
+ qede_clr_bit(bit, bitmap)
+
+bool qede_test_bit(u32, unsigned long *);
+#define OSAL_TEST_BIT(bit, bitmap) \
+ qede_test_bit(bit, bitmap)
+
+u32 qede_find_first_zero_bit(unsigned long *, u32);
+#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \
+ qede_find_first_zero_bit(bitmap, length)
+
+#define OSAL_BUILD_BUG_ON(cond) nothing
+#define ETH_ALEN ETHER_ADDR_LEN
+
+#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
+#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
+
+/* SR-IOV channel */
+
+#define OSAL_VF_FLR_UPDATE(hwfn) nothing
+#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
+#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
+#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
+#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
+#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
+#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing
+#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
+#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
+#define OSAL_IOV_GET_OS_TYPE() 0
+
+u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf);
+void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
+ struct ecore_vf_acquire_sw_info *);
+#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \
+ qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info)
+
+#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \
+ qede_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf)
+
+/* TODO: */
+#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
+#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
+
+#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
+#define OSAL_NUM_ACTIVE_CPU() 0
+
+/* Utility functions */
+
+#define RTE_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP(size, to_what) RTE_DIV_ROUND_UP(size, to_what)
+#define RTE_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDUP(value, to_what) RTE_ROUNDUP((value), (to_what))
+
+unsigned long qede_log2_align(unsigned long n);
+#define OSAL_ROUNDUP_POW_OF_TWO(val) \
+ qede_log2_align(val)
+
+u32 qede_osal_log2(u32);
+#define OSAL_LOG2(val) \
+ qede_osal_log2(val)
+
+#define PRINT(format, ...) printf
+#define PRINT_ERR(format, ...) PRINT
+
+#define OFFSETOF(str, field) __builtin_offsetof(str, field)
+#define OSAL_ASSERT(is_assert) assert(is_assert)
+#define OSAL_BEFORE_PF_START(file, engine) nothing
+#define OSAL_AFTER_PF_STOP(file, engine) nothing
+
+/* Endian macros */
+#define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val)
+#define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val)
+#define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val)
+#define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val)
+#define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val)
+#define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val)
+#define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val)
+
+#define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr)
+#define OSAL_SPRINTF(name, pattern, ...) \
+ sprintf(name, pattern, ##__VA_ARGS__)
+#define OSAL_STRLEN(string) strlen(string)
+#define OSAL_STRCPY(dst, string) strcpy(dst, string)
+#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
+#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
+
+#define OSAL_INLINE inline
+#define OSAL_REG_ADDR(_p_hwfn, _offset) \
+ (void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset))
+#define OSAL_PAGE_SIZE 4096
+#define OSAL_IOMEM volatile
+#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#define OSAL_MIN_T(type, __min1, __min2) \
+ ((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
+#define OSAL_MAX_T(type, __max1, __max2) \
+ ((type)(__max1) > (type)(__max2) ? (type)(__max1) : (type)(__max2))
+
+void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
+ union ecore_mcp_protocol_stats *);
+#define OSAL_GET_PROTOCOL_STATS(dev, type, stats) \
+ qede_get_mcp_proto_stats(dev, type, stats)
+
+#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
+
+#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
new file mode 100644
index 00000000..295a41f9
--- /dev/null
+++ b/drivers/net/qede/base/common_hsi.h
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+
+#define CORE_SPQE_PAGE_SIZE_BYTES 4096
+
+#define FW_MAJOR_VERSION 8
+#define FW_MINOR_VERSION 7
+#define FW_REVISION_VERSION 7
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2 (4)
+#define MAX_NUM_PORTS_BB (2)
+#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
+
+#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2 (208)
+#define MAX_NUM_VPORTS_BB (160)
+#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2 (320)
+#define MAX_NUM_L2_QUEUES_BB (256)
+#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2 (4)
+#define NUM_OF_PHYS_TCS (8)
+
+#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC (NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO (8)
+
+#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
+
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+
+/*****************/
+/* DQ CONSTANTS */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY 0
+
+/* XCM agg val selection */
+#define DQ_XCM_AGG_VAL_SEL_WORD2 0
+#define DQ_XCM_AGG_VAL_SEL_WORD3 1
+#define DQ_XCM_AGG_VAL_SEL_WORD4 2
+#define DQ_XCM_AGG_VAL_SEL_WORD5 3
+#define DQ_XCM_AGG_VAL_SEL_REG3 4
+#define DQ_XCM_AGG_VAL_SEL_REG4 5
+#define DQ_XCM_AGG_VAL_SEL_REG5 6
+#define DQ_XCM_AGG_VAL_SEL_REG6 7
+
+/* XCM agg val selection */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+ DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13 3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18 4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19 5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
+ DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/*****************/
+/* QM CONSTANTS */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2 512
+#define MAX_QM_TX_QUEUES_BB 448
+#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE 0x200
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH 16
+#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH 24
+#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH 32
+#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH 32
+#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX 0
+#define CAU_FSM_ETH_TX 1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB 12
+
+#define CAU_HC_STOPPED_STATE 3
+#define CAU_HC_DISABLE_STATE 4
+#define CAU_HC_ENABLE_STATE 0
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2 (368)
+#define MAX_SB_PER_PATH_BB (288)
+#define MAX_TOT_SB_PER_PATH \
+ MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD 129
+#define MAX_SB_PER_PF_SIMD 64
+#define MAX_SB_PER_VF 64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE 0x0000
+
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE 0x0600
+#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* PTT and GTT */
+#define PXP_NUM_PF_WINDOWS 12
+#define PXP_PER_PF_ENTRY_SIZE 8
+#define PXP_NUM_GLOBAL_WINDOWS 243
+#define PXP_GLOBAL_ENTRY_SIZE 4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
+#define PXP_PF_WINDOW_ADMIN_START 0
+#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
+#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
+ PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
+ PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+ PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
+ PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+ (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+ PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4
+#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+ PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+ (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+ (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+/******************/
+/* PBF CONSTANTS */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+/* Async data KCQ CQE */
+struct async_data {
+ __le32 cid;
+ __le16 itid;
+ u8 error_code;
+ u8 fw_debug_param;
+};
+
+struct regpair {
+ __le32 lo /* low word for reg-pair */;
+ __le32 hi /* high word for reg-pair */;
+};
+
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr /* VF-PF message address */;
+};
+
+struct iscsi_eqe_data {
+ __le32 cid /* Context ID of the connection */;
+ __le16 conn_id
+ /* Task Id of the task (for error that happened on a a task) */;
+ u8 error_code;
+ u8 reserved0;
+};
+
+/*
+ * Event Ring malicious VF data
+ */
+struct malicious_vf_eqe_data {
+ u8 vf_id /* Malicious VF ID */; /* WARNING:CAMELCASE */
+ u8 err_id /* Malicious VF error */;
+ __le16 reserved[3];
+};
+
+/*
+ * Event Ring initial cleanup data
+ */
+struct initial_cleanup_eqe_data {
+ u8 vf_id /* VF ID */; /* WARNING:CAMELCASE */
+ u8 reserved[7];
+};
+
+
+union event_ring_data {
+ u8 bytes[8] /* Byte Array */;
+ struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
+ struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
+ struct regpair roce_handle /* WARNING:CAMELCASE */
+ /* Dedicated field for RoCE affiliated asynchronous error */;
+ struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
+ struct initial_cleanup_eqe_data vf_init_cleanup
+ /* VF Initial Cleanup data */;
+};
+/* Event Ring Entry */
+struct event_ring_entry {
+ u8 protocol_id;
+ u8 opcode;
+ __le16 reserved0;
+ __le16 echo;
+ u8 fw_return_code;
+ u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+ SF,
+ MF_OVLAN,
+ MF_NPAR,
+ MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+ PROTOCOLID_ISCSI /* iSCSI */,
+ PROTOCOLID_FCOE /* FCoE */,
+ PROTOCOLID_ROCE /* RoCE */,
+ PROTOCOLID_CORE /* Core (light L2, slow path core) */,
+ PROTOCOLID_ETH /* Ethernet */,
+ PROTOCOLID_IWARP /* iWARP */,
+ PROTOCOLID_TOE /* TOE */,
+ PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */,
+ PROTOCOLID_COMMON /* ProtocolCommon */,
+ PROTOCOLID_TCP /* TCP */,
+ MAX_PROTOCOL_TYPE
+};
+
+/* status block structure */
+struct cau_pi_entry {
+ u32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT 24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+ u32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
+#define CAU_SB_ENTRY_STATE0_MASK 0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT 24
+#define CAU_SB_ENTRY_STATE1_MASK 0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT 28
+ u32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18
+#define CAU_SB_ENTRY_VF_VALID_MASK 0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+#define CAU_SB_ENTRY_TPH_MASK 0x1
+#define CAU_SB_ENTRY_TPH_SHIFT 31
+};
+
+/* core doorbell data */
+struct core_db_data {
+ u8 params;
+#define CORE_DB_DATA_DEST_MASK 0x3
+#define CORE_DB_DATA_DEST_SHIFT 0
+#define CORE_DB_DATA_AGG_CMD_MASK 0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT 2
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
+#define CORE_DB_DATA_RESERVED_MASK 0x1
+#define CORE_DB_DATA_RESERVED_SHIFT 5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+ DB_AGG_CMD_NOP,
+ DB_AGG_CMD_SET,
+ DB_AGG_CMD_ADD,
+ DB_AGG_CMD_MAX,
+ MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+ DB_DEST_XCM,
+ DB_DEST_UCM,
+ DB_DEST_TCM,
+ DB_NUM_DESTINATIONS,
+ MAX_DB_DEST
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+ __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK 0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT 2
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT 5
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+ IGU_INT_ENABLE = 0,
+ IGU_INT_DISABLE = 1,
+ IGU_INT_NOP = 2,
+ IGU_INT_NOP2 = 3,
+ MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+ u32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
+ u32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+ IGU_SEG_ACCESS_REG = 0,
+ IGU_SEG_ACCESS_ATTN = 1,
+ MAX_IGU_SEG_ACCESS
+};
+
+struct parsing_and_err_flags {
+ __le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+ __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_CONCRETE_FID_PORT_MASK 0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT 4
+#define PXP_CONCRETE_FID_PATH_MASK 0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT 6
+#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT 8
+};
+
+struct pxp_pretend_concrete_fid {
+ __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
+};
+
+union pxp_pretend_fid {
+ struct pxp_pretend_concrete_fid concrete_fid;
+ __le16 opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+ union pxp_pretend_fid fid;
+ __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT 0
+#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1
+#define PXP_PRETEND_CMD_PORT_MASK 0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT 2
+#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4
+#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+ __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT 0
+#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+ struct pxp_pretend_cmd pretend;
+};
+
+/* RSS hash type */
+enum rss_hash_type {
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
+ MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
+ __le32 sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+ __le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+};
+
+/* @DPDK */
+#define X_FINAL_CLEANUP_AGG_INT 1
+#define SDM_COMP_TYPE_AGG_INT 2
+#define MAX_NUM_LL2_RX_QUEUES 32
+#define QM_PQ_ELEMENT_SIZE 4
+#define PXP_VF_BAR0_START_IGU 0
+#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
+
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 16
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 0
+#define YSTORM_QZONE_SIZE 8
+#define PSTORM_QZONE_SIZE 0
+
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC \
+(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU \
+(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
+(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ \
+(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B \
+(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B \
+(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B \
+(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B \
+(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B \
+(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B \
+(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+#endif /* __COMMON_HSI__ */
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
new file mode 100644
index 00000000..d682a785
--- /dev/null
+++ b/drivers/net/qede/base/ecore.h
@@ -0,0 +1,754 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_H
+#define __ECORE_H
+
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_tools.h"
+#include "ecore_proto_if.h"
+#include "mcp_public.h"
+
+#define MAX_HWFNS_PER_DEVICE (4)
+#define NAME_SIZE 64 /* @DPDK */
+#define VER_SIZE 16
+/* @DPDK ARRAY_DECL */
+#define ECORE_WFQ_UNIT 100
+#include "../qede_logs.h" /* @DPDK */
+
+/* Constants */
+#define ECORE_WID_SIZE (1024)
+
+/* Configurable */
+#define ECORE_PF_DEMS_SIZE (4)
+
+/* cau states */
+enum ecore_coalescing_mode {
+ ECORE_COAL_MODE_DISABLE,
+ ECORE_COAL_MODE_ENABLE
+};
+
+enum ecore_nvm_cmd {
+ ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
+ ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
+ ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
+ ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
+ ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
+ ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
+ ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
+ ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
+ ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
+ ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
+ ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
+};
+
+#ifndef LINUX_REMOVE
+#if !defined(CONFIG_ECORE_L2)
+#define CONFIG_ECORE_L2
+#define CONFIG_ECORE_SRIOV
+#endif
+#endif
+
+/* helpers */
+#ifndef __EXTRACT__LINUX__
+#define MASK_FIELD(_name, _value) \
+ ((_value) &= (_name##_MASK))
+
+#define FIELD_VALUE(_name, _value) \
+ ((_value & _name##_MASK) << _name##_SHIFT)
+
+#define SET_FIELD(value, name, flag) \
+do { \
+ (value) &= ~(name##_MASK << name##_SHIFT); \
+ (value) |= (((u64)flag) << (name##_SHIFT)); \
+} while (0)
+
+#define GET_FIELD(value, name) \
+ (((value) >> (name##_SHIFT)) & name##_MASK)
+#endif
+
+static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * ECORE_PF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+/* @DPDK: This is a backport from latest ecore for TSS fix */
+static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+ return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
+ ((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
+ ~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
+
+#ifndef U64_HI
+#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#endif
+
+#ifndef U64_LO
+#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+#endif
+
+#ifndef __EXTRACT__LINUX__
+enum DP_LEVEL {
+ ECORE_LEVEL_VERBOSE = 0x0,
+ ECORE_LEVEL_INFO = 0x1,
+ ECORE_LEVEL_NOTICE = 0x2,
+ ECORE_LEVEL_ERR = 0x3,
+};
+
+#define ECORE_LOG_LEVEL_SHIFT (30)
+#define ECORE_LOG_VERBOSE_MASK (0x3fffffff)
+#define ECORE_LOG_INFO_MASK (0x40000000)
+#define ECORE_LOG_NOTICE_MASK (0x80000000)
+
+enum DP_MODULE {
+#ifndef LINUX_REMOVE
+ ECORE_MSG_DRV = 0x0001,
+ ECORE_MSG_PROBE = 0x0002,
+ ECORE_MSG_LINK = 0x0004,
+ ECORE_MSG_TIMER = 0x0008,
+ ECORE_MSG_IFDOWN = 0x0010,
+ ECORE_MSG_IFUP = 0x0020,
+ ECORE_MSG_RX_ERR = 0x0040,
+ ECORE_MSG_TX_ERR = 0x0080,
+ ECORE_MSG_TX_QUEUED = 0x0100,
+ ECORE_MSG_INTR = 0x0200,
+ ECORE_MSG_TX_DONE = 0x0400,
+ ECORE_MSG_RX_STATUS = 0x0800,
+ ECORE_MSG_PKTDATA = 0x1000,
+ ECORE_MSG_HW = 0x2000,
+ ECORE_MSG_WOL = 0x4000,
+#endif
+ ECORE_MSG_SPQ = 0x10000,
+ ECORE_MSG_STATS = 0x20000,
+ ECORE_MSG_DCB = 0x40000,
+ ECORE_MSG_IOV = 0x80000,
+ ECORE_MSG_SP = 0x100000,
+ ECORE_MSG_STORAGE = 0x200000,
+ ECORE_MSG_CXT = 0x800000,
+ ECORE_MSG_ILT = 0x2000000,
+ ECORE_MSG_DEBUG = 0x8000000,
+ /* to be added...up to 0x8000000 */
+};
+#endif
+
+#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+ (val == (cond1) ? true1 : \
+ (val == (cond2) ? true2 : def))
+
+/* forward */
+struct ecore_ptt_pool;
+struct ecore_spq;
+struct ecore_sb_info;
+struct ecore_sb_attn_info;
+struct ecore_cxt_mngr;
+struct ecore_dma_mem;
+struct ecore_sb_sp_info;
+struct ecore_igu_info;
+struct ecore_mcp_info;
+struct ecore_dcbx_info;
+
+struct ecore_rt_data {
+ u32 *init_val;
+ bool *b_valid;
+};
+
+enum ecore_tunn_mode {
+ ECORE_MODE_L2GENEVE_TUNN,
+ ECORE_MODE_IPGENEVE_TUNN,
+ ECORE_MODE_L2GRE_TUNN,
+ ECORE_MODE_IPGRE_TUNN,
+ ECORE_MODE_VXLAN_TUNN,
+};
+
+enum ecore_tunn_clss {
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ MAX_ECORE_TUNN_CLSS,
+};
+
+struct ecore_tunn_start_params {
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+struct ecore_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+struct ecore_hw_sriov_info {
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u64 active_vfs[3]; /* bitfield of active vfs */
+#define ECORE_IS_VF_ACTIVE(_p_dev, _rel_vf_id) \
+ (!!(_p_dev->sriov_info.active_vfs[_rel_vf_id / 64] & \
+ (1ULL << (_rel_vf_id % 64))))
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ bool b_hw_channel; /* Whether PF uses the HW-channel */
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum ecore_pci_personality {
+ ECORE_PCI_ETH,
+ ECORE_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct ecore_qm_iids {
+ u32 cids;
+ u32 vf_cids;
+ u32 tids;
+};
+
+#define MAX_PF_PER_PORT 8
+
+/*@@@TBD MK RESC: need to remove and use MCP interface instead */
+/* HW / FW resources, output of features supported below, most information
+ * is received from MFW.
+ */
+enum ECORE_RESOURCES {
+ ECORE_SB,
+ ECORE_L2_QUEUE,
+ ECORE_VPORT,
+ ECORE_RSS_ENG,
+ ECORE_PQ,
+ ECORE_RL,
+ ECORE_MAC,
+ ECORE_VLAN,
+ ECORE_ILT,
+ ECORE_CMDQS_CQS,
+ ECORE_MAX_RESC,
+};
+
+/* Features that require resources, given as input to the resource management
+ * algorithm, the output are the resources above
+ */
+enum ECORE_FEATURE {
+ ECORE_PF_L2_QUE,
+ ECORE_PF_TC,
+ ECORE_VF,
+ ECORE_EXTRA_VF_QUE,
+ ECORE_VMQ,
+ ECORE_MAX_FEATURES,
+};
+
+enum ECORE_PORT_MODE {
+ ECORE_PORT_MODE_DE_2X40G,
+ ECORE_PORT_MODE_DE_2X50G,
+ ECORE_PORT_MODE_DE_1X100G,
+ ECORE_PORT_MODE_DE_4X10G_F,
+ ECORE_PORT_MODE_DE_4X10G_E,
+ ECORE_PORT_MODE_DE_4X20G,
+ ECORE_PORT_MODE_DE_1X40G,
+ ECORE_PORT_MODE_DE_2X25G,
+ ECORE_PORT_MODE_DE_1X25G
+};
+
+enum ecore_dev_cap {
+ ECORE_DEV_CAP_ETH,
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_hw_err_type {
+ ECORE_HW_ERR_FAN_FAIL,
+ ECORE_HW_ERR_MFW_RESP_FAIL,
+ ECORE_HW_ERR_HW_ATTN,
+ ECORE_HW_ERR_DMAE_FAIL,
+ ECORE_HW_ERR_RAMROD_FAIL,
+ ECORE_HW_ERR_FW_ASSERT,
+};
+#endif
+
+struct ecore_hw_info {
+ /* PCI personality */
+ enum ecore_pci_personality personality;
+
+ /* Resource Allocation scheme results */
+ u32 resc_start[ECORE_MAX_RESC];
+ u32 resc_num[ECORE_MAX_RESC];
+ u32 feat_num[ECORE_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+ RESC_NUM(_p_hwfn, resc))
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ u8 num_tc;
+ u8 ooo_tc;
+ u8 offload_tc;
+ u8 non_offload_tc;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 ovlan;
+ u32 part_num[4];
+
+ unsigned char hw_mac_addr[ETH_ALEN];
+
+ struct ecore_igu_info *p_igu_info;
+ /* Sriov */
+ u32 first_vf_in_pf;
+ u8 max_chains_per_vf;
+
+ u32 port_mode;
+ u32 hw_mode;
+ unsigned long device_capabilities;
+};
+
+struct ecore_hw_cid_data {
+ u32 cid;
+ bool b_cid_allocated;
+ u8 vfid; /* 1-based; 0 signals this is for a PF */
+
+ /* Additional identifiers */
+ u16 opaque_fid;
+ u8 vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE 0x2000
+
+struct ecore_dmae_info {
+ /* Mutex for synchronizing access to functions */
+ osal_mutex_t mutex;
+
+ u8 channel;
+
+ dma_addr_t completion_word_phys_addr;
+
+ /* The memory location where the DMAE writes the completion
+ * value when an operation is finished on this context.
+ */
+ u32 *p_completion_word;
+
+ dma_addr_t intermediate_buffer_phys_addr;
+
+ /* An intermediate buffer for DMAE operations that use virtual
+ * addresses - data is DMA'd to/from this buffer and then
+ * memcpy'd to/from the virtual address
+ */
+ u32 *p_intermediate_buffer;
+
+ dma_addr_t dmae_cmd_phys_addr;
+ struct dmae_cmd *p_dmae_cmd;
+};
+
+struct ecore_wfq_data {
+ u32 default_min_speed; /* When wfq feature is not configured */
+ u32 min_speed; /* when feature is configured for any 1 vport */
+ bool configured;
+};
+
+struct ecore_qm_info {
+ struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_vport_params *qm_vport_params;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u8 pure_lb_pq;
+ u8 offload_pq;
+ u8 pure_ack_pq;
+ u8 ooo_pq;
+ u8 vf_queues_offset;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct ecore_wfq_data *wfq_data;
+};
+
+struct storm_stats {
+ u32 address;
+ u32 len;
+};
+
+#define CONFIG_ECORE_BINARY_FW
+#define CONFIG_ECORE_ZIPPED_FW
+
+struct ecore_fw_data {
+#ifdef CONFIG_ECORE_BINARY_FW
+ struct fw_ver_info *fw_ver_info;
+#endif
+ const u8 *modes_tree_buf;
+ union init_op *init_ops;
+ const u32 *arr_data;
+ u32 init_ops_size;
+};
+
+struct ecore_hwfn {
+ struct ecore_dev *p_dev;
+ u8 my_id; /* ID inside the PF */
+#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+ u8 rel_pf_id; /* Relative to engine */
+ u8 abs_pf_id;
+#define ECORE_PATH_ID(_p_hwfn) \
+ (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+ u8 port_id;
+ bool b_active;
+
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+ void *dp_ctx;
+
+ bool first_on_engine;
+ bool hw_init_done;
+
+ u8 num_funcs_on_engine;
+
+ /* BAR access */
+ void OSAL_IOMEM *regview;
+ void OSAL_IOMEM *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PTT pool */
+ struct ecore_ptt_pool *p_ptt_pool;
+
+ /* HW info */
+ struct ecore_hw_info hw_info;
+
+ /* rt_array (for init-tool) */
+ struct ecore_rt_data rt_data;
+
+ /* SPQ */
+ struct ecore_spq *p_spq;
+
+ /* EQ */
+ struct ecore_eq *p_eq;
+
+ /* Consolidate Q */
+ struct ecore_consq *p_consq;
+
+ /* Slow-Path definitions */
+ osal_dpc_t sp_dpc;
+ bool b_sp_dpc_enabled;
+
+ struct ecore_ptt *p_main_ptt;
+ struct ecore_ptt *p_dpc_ptt;
+
+ struct ecore_sb_sp_info *p_sp_sb;
+ struct ecore_sb_attn_info *p_sb_attn;
+
+ /* Protocol related */
+ struct ecore_ooo_info *p_ooo_info;
+ struct ecore_pf_params pf_params;
+
+ /* Array of sb_info of all status blocks */
+ struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
+
+ struct ecore_cxt_mngr *p_cxt_mngr;
+
+ /* Flag indicating whether interrupts are enabled or not */
+ bool b_int_enabled;
+ bool b_int_requested;
+
+ /* True if the driver requests for the link */
+ bool b_drv_link_init;
+
+ struct ecore_vf_iov *vf_iov_info;
+ struct ecore_pf_iov *pf_iov_info;
+ struct ecore_mcp_info *mcp_info;
+ struct ecore_dcbx_info *p_dcbx_info;
+
+ struct ecore_hw_cid_data *p_tx_cids;
+ struct ecore_hw_cid_data *p_rx_cids;
+
+ struct ecore_dmae_info dmae_info;
+
+ /* QM init */
+ struct ecore_qm_info qm_info;
+
+ /* Buffer for unzipping firmware data */
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ void *unzip_buf;
+#endif
+
+ struct dbg_tools_data dbg_info;
+
+ struct z_stream_s *stream;
+
+ /* PWM region specific data */
+ u32 dpi_size;
+ u32 dpi_count;
+ u32 dpi_start_offset; /* this is used to
+ * calculate th
+ * doorbell address
+ */
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_mf_mode {
+ ECORE_MF_DEFAULT,
+ ECORE_MF_OVLAN,
+ ECORE_MF_NPAR,
+};
+#endif
+
+struct ecore_dev {
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+ void *dp_ctx;
+
+ u8 type;
+#define ECORE_DEV_TYPE_BB (0 << 0)
+#define ECORE_DEV_TYPE_AH (1 << 0)
+/* Translate type/revision combo into the proper conditions */
+#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
+#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && \
+ CHIP_REV_IS_A0(dev))
+#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && \
+ CHIP_REV_IS_B0(dev))
+#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
+#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+#define ECORE_GET_TYPE(dev) (ECORE_IS_BB_A0(dev) ? CHIP_BB_A0 : \
+ ECORE_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
+
+ u16 vendor_id;
+ u16 device_id;
+
+ u16 chip_num;
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 16
+
+ u16 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 12
+#ifndef ASIC_ONLY
+#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
+#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
+#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
+#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+ CHIP_REV_IS_EMUL_B0(_p_dev))
+#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
+#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
+#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
+ CHIP_REV_IS_FPGA_B0(_p_dev))
+#define CHIP_REV_IS_SLOW(_p_dev) \
+ (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
+#define CHIP_REV_IS_A0(_p_dev) \
+ (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+ CHIP_REV_IS_FPGA_A0(_p_dev) || \
+ !(_p_dev)->chip_rev)
+#define CHIP_REV_IS_B0(_p_dev) \
+ (CHIP_REV_IS_EMUL_B0(_p_dev) || \
+ CHIP_REV_IS_FPGA_B0(_p_dev) || \
+ (_p_dev)->chip_rev == 1)
+#define CHIP_REV_IS_ASIC(_p_dev) (!CHIP_REV_IS_SLOW(_p_dev))
+#else
+#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
+#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
+#endif
+
+ u16 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 4
+
+ u16 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xf
+#define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports_in_engines;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+ enum ecore_mf_mode mf_mode;
+#define IS_MF_DEFAULT(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+
+ int pcie_width;
+ int pcie_speed;
+ u8 ver_str[VER_SIZE];
+ /* Add MF related configuration */
+ u8 mcp_rev;
+ u8 boot_mode;
+
+ u8 wol;
+
+ u32 int_mode;
+ enum ecore_coalescing_mode int_coalescing_mode;
+ u8 rx_coalesce_usecs;
+ u8 tx_coalesce_usecs;
+
+ /* Start Bar offset of first hwfn */
+ void OSAL_IOMEM *regview;
+ void OSAL_IOMEM *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PCI */
+ u8 cache_shift;
+
+ /* Init */
+ const struct iro *iro_arr;
+#define IRO (p_hwfn->p_dev->iro_arr)
+
+ /* HW functions */
+ u8 num_hwfns;
+ struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+ /* SRIOV */
+ struct ecore_hw_sriov_info sriov_info;
+ unsigned long tunn_mode;
+#define IS_ECORE_SRIOV(edev) (!!((edev)->sriov_info.total_vfs))
+ bool b_is_vf;
+
+ u32 drv_type;
+
+ struct ecore_eth_stats *reset_stats;
+ struct ecore_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
+
+ /* Recovery */
+ bool recov_in_prog;
+
+#ifndef ASIC_ONLY
+ bool b_is_emul_full;
+#endif
+
+ void *firmware;
+
+ u64 fw_len;
+
+};
+
+#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
+ : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+ : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+ : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+ : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
+ : MAX_NUM_PFS_K2)
+
+#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
+ (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
+ (ECORE_PATH_ID(p_hwfn) == 1) && \
+ ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
+ (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
+ (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
+
+/**
+ * @brief ecore_concrete_to_sw_fid - get the sw function id from
+ * the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return OSAL_INLINE u8
+ */
+static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
+ u32 concrete_fid)
+{
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
+ u8 sw_fid;
+
+ if (vf_valid)
+ sw_fid = vfid + MAX_NUM_PFS;
+ else
+ sw_fid = pfid;
+
+ return sw_fid;
+}
+
+#define PURE_LB_TC 8
+#define OOO_LB_TC 9
+
+static OSAL_INLINE u16 ecore_sriov_get_next_vf(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ u16 i;
+
+ for (i = rel_vf_id; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+ if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, i))
+ return i;
+
+ return p_hwfn->p_dev->sriov_info.total_vfs;
+}
+
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ u32 min_pf_rate);
+
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+int ecore_device_num_engines(struct ecore_dev *p_dev);
+int ecore_device_num_ports(struct ecore_dev *p_dev);
+
+#define ecore_for_each_vf(_p_hwfn, _i) \
+ for (_i = ecore_sriov_get_next_vf(_p_hwfn, 0); \
+ _i < _p_hwfn->p_dev->sriov_info.total_vfs; \
+ _i = ecore_sriov_get_next_vf(_p_hwfn, _i + 1))
+
+#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h
new file mode 100644
index 00000000..d8951bcc
--- /dev/null
+++ b/drivers/net/qede/base/ecore_attn_values.h
@@ -0,0 +1,13287 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ATTN_VALUES_H__
+#define __ATTN_VALUES_H__
+
+#ifndef __PREVENT_INT_ATTN__
+
+/* HW Attention register */
+struct attn_hw_reg {
+ u16 reg_idx; /* Index of this register in its block */
+ u16 num_of_bits; /* number of valid attention bits */
+ const u16 *bit_attn_idx; /* attention index per valid bit */
+ u32 sts_addr; /* Address of the STS register */
+ u32 sts_clr_addr; /* Address of the STS_CLR register */
+ u32 sts_wr_addr; /* Address of the STS_WR register */
+ u32 mask_addr; /* Address of the MASK register */
+};
+
+/* HW block attention registers */
+struct attn_hw_regs {
+ u16 num_of_int_regs; /* Number of interrupt regs */
+ u16 num_of_prty_regs; /* Number of parity regs */
+ struct attn_hw_reg **int_regs; /* interrupt regs */
+ struct attn_hw_reg **prty_regs; /* parity regs */
+};
+
+/* HW block attention registers */
+struct attn_hw_block {
+ const char *name; /* Block name */
+ const char **int_desc; /* Array of interrupt attention descriptions */
+ const char **prty_desc; /* Array of parity attention descriptions */
+ struct attn_hw_regs chip_regs[3]; /* attention regs per chip.*/
+};
+
+#ifdef ATTN_DESC
+static const char *grc_int_attn_desc[5] = {
+ "grc_address_error",
+ "grc_timeout_event",
+ "grc_global_reserved_address",
+ "grc_path_isolation_error",
+ "grc_trace_fifo_valid_data",
+};
+#else
+#define grc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 grc_int0_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg grc_int0_bb_a0 = {
+ 0, 4, grc_int0_bb_a0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_bb_a0_regs[1] = {
+ &grc_int0_bb_a0,
+};
+
+static const u16 grc_int0_bb_b0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg grc_int0_bb_b0 = {
+ 0, 4, grc_int0_bb_b0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
+ &grc_int0_bb_b0,
+};
+
+static const u16 grc_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg grc_int0_k2 = {
+ 0, 5, grc_int0_k2_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_k2_regs[1] = {
+ &grc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *grc_prty_attn_desc[3] = {
+ "grc_mem003_i_mem_prty",
+ "grc_mem002_i_mem_prty",
+ "grc_mem001_i_mem_prty",
+};
+#else
+#define grc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 grc_prty1_bb_a0_attn_idx[2] = {
+ 1, 2,
+};
+
+static struct attn_hw_reg grc_prty1_bb_a0 = {
+ 0, 2, grc_prty1_bb_a0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_bb_a0_regs[1] = {
+ &grc_prty1_bb_a0,
+};
+
+static const u16 grc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg grc_prty1_bb_b0 = {
+ 0, 2, grc_prty1_bb_b0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
+ &grc_prty1_bb_b0,
+};
+
+static const u16 grc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg grc_prty1_k2 = {
+ 0, 2, grc_prty1_k2_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_k2_regs[1] = {
+ &grc_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *miscs_int_attn_desc[14] = {
+ "miscs_address_error",
+ "miscs_generic_sw",
+ "miscs_cnig_interrupt",
+ "miscs_opte_dorq_fifo_err_eng1",
+ "miscs_opte_dorq_fifo_err_eng0",
+ "miscs_opte_dbg_fifo_err_eng1",
+ "miscs_opte_dbg_fifo_err_eng0",
+ "miscs_opte_btb_if1_fifo_err_eng1",
+ "miscs_opte_btb_if1_fifo_err_eng0",
+ "miscs_opte_btb_if0_fifo_err_eng1",
+ "miscs_opte_btb_if0_fifo_err_eng0",
+ "miscs_opte_btb_sop_fifo_err_eng1",
+ "miscs_opte_btb_sop_fifo_err_eng0",
+ "miscs_opte_storm_fifo_err_eng0",
+};
+#else
+#define miscs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 miscs_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg miscs_int0_bb_a0 = {
+ 0, 2, miscs_int0_bb_a0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static const u16 miscs_int1_bb_a0_attn_idx[11] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg miscs_int1_bb_a0 = {
+ 1, 11, miscs_int1_bb_a0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194
+};
+
+static struct attn_hw_reg *miscs_int_bb_a0_regs[2] = {
+ &miscs_int0_bb_a0, &miscs_int1_bb_a0,
+};
+
+static const u16 miscs_int0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg miscs_int0_bb_b0 = {
+ 0, 3, miscs_int0_bb_b0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static const u16 miscs_int1_bb_b0_attn_idx[11] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg miscs_int1_bb_b0 = {
+ 1, 11, miscs_int1_bb_b0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194
+};
+
+static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
+ &miscs_int0_bb_b0, &miscs_int1_bb_b0,
+};
+
+static const u16 miscs_int0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg miscs_int0_k2 = {
+ 0, 3, miscs_int0_k2_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static struct attn_hw_reg *miscs_int_k2_regs[1] = {
+ &miscs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *miscs_prty_attn_desc[1] = {
+ "miscs_cnig_parity",
+};
+#else
+#define miscs_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 miscs_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg miscs_prty0_bb_b0 = {
+ 0, 1, miscs_prty0_bb_b0_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4
+};
+
+static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
+ &miscs_prty0_bb_b0,
+};
+
+static const u16 miscs_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg miscs_prty0_k2 = {
+ 0, 1, miscs_prty0_k2_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4
+};
+
+static struct attn_hw_reg *miscs_prty_k2_regs[1] = {
+ &miscs_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *misc_int_attn_desc[1] = {
+ "misc_address_error",
+};
+#else
+#define misc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 misc_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_bb_a0 = {
+ 0, 1, misc_int0_bb_a0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_bb_a0_regs[1] = {
+ &misc_int0_bb_a0,
+};
+
+static const u16 misc_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_bb_b0 = {
+ 0, 1, misc_int0_bb_b0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
+ &misc_int0_bb_b0,
+};
+
+static const u16 misc_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg misc_int0_k2 = {
+ 0, 1, misc_int0_k2_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_k2_regs[1] = {
+ &misc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglue_b_int_attn_desc[24] = {
+ "pglue_b_address_error",
+ "pglue_b_incorrect_rcv_behavior",
+ "pglue_b_was_error_attn",
+ "pglue_b_vf_length_violation_attn",
+ "pglue_b_vf_grc_space_violation_attn",
+ "pglue_b_tcpl_error_attn",
+ "pglue_b_tcpl_in_two_rcbs_attn",
+ "pglue_b_cssnoop_fifo_overflow",
+ "pglue_b_tcpl_translation_size_different",
+ "pglue_b_pcie_rx_l0s_timeout",
+ "pglue_b_master_zlr_attn",
+ "pglue_b_admin_window_violation_attn",
+ "pglue_b_out_of_range_function_in_pretend",
+ "pglue_b_illegal_address",
+ "pglue_b_pgl_cpl_err",
+ "pglue_b_pgl_txw_of",
+ "pglue_b_pgl_cpl_aft",
+ "pglue_b_pgl_cpl_of",
+ "pglue_b_pgl_cpl_ecrc",
+ "pglue_b_pgl_pcie_attn",
+ "pglue_b_pgl_read_blocked",
+ "pglue_b_pgl_write_blocked",
+ "pglue_b_vf_ilt_err",
+ "pglue_b_rxobffexception_attn",
+};
+#else
+#define pglue_b_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglue_b_int0_bb_a0_attn_idx[23] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22,
+};
+
+static struct attn_hw_reg pglue_b_int0_bb_a0 = {
+ 0, 23, pglue_b_int0_bb_a0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188,
+ 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_bb_a0_regs[1] = {
+ &pglue_b_int0_bb_a0,
+};
+
+static const u16 pglue_b_int0_bb_b0_attn_idx[23] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22,
+};
+
+static struct attn_hw_reg pglue_b_int0_bb_b0 = {
+ 0, 23, pglue_b_int0_bb_b0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188,
+ 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
+ &pglue_b_int0_bb_b0,
+};
+
+static const u16 pglue_b_int0_k2_attn_idx[24] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg pglue_b_int0_k2 = {
+ 0, 24, pglue_b_int0_k2_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_k2_regs[1] = {
+ &pglue_b_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglue_b_prty_attn_desc[35] = {
+ "pglue_b_datapath_registers",
+ "pglue_b_mem027_i_mem_prty",
+ "pglue_b_mem007_i_mem_prty",
+ "pglue_b_mem009_i_mem_prty",
+ "pglue_b_mem010_i_mem_prty",
+ "pglue_b_mem008_i_mem_prty",
+ "pglue_b_mem022_i_mem_prty",
+ "pglue_b_mem023_i_mem_prty",
+ "pglue_b_mem024_i_mem_prty",
+ "pglue_b_mem025_i_mem_prty",
+ "pglue_b_mem004_i_mem_prty",
+ "pglue_b_mem005_i_mem_prty",
+ "pglue_b_mem011_i_mem_prty",
+ "pglue_b_mem016_i_mem_prty",
+ "pglue_b_mem017_i_mem_prty",
+ "pglue_b_mem012_i_mem_prty",
+ "pglue_b_mem013_i_mem_prty",
+ "pglue_b_mem014_i_mem_prty",
+ "pglue_b_mem015_i_mem_prty",
+ "pglue_b_mem018_i_mem_prty",
+ "pglue_b_mem020_i_mem_prty",
+ "pglue_b_mem021_i_mem_prty",
+ "pglue_b_mem019_i_mem_prty",
+ "pglue_b_mem026_i_mem_prty",
+ "pglue_b_mem006_i_mem_prty",
+ "pglue_b_mem003_i_mem_prty",
+ "pglue_b_mem002_i_mem_prty_0",
+ "pglue_b_mem002_i_mem_prty_1",
+ "pglue_b_mem002_i_mem_prty_2",
+ "pglue_b_mem002_i_mem_prty_3",
+ "pglue_b_mem002_i_mem_prty_4",
+ "pglue_b_mem002_i_mem_prty_5",
+ "pglue_b_mem002_i_mem_prty_6",
+ "pglue_b_mem002_i_mem_prty_7",
+ "pglue_b_mem001_i_mem_prty",
+};
+#else
+#define pglue_b_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglue_b_prty1_bb_a0_attn_idx[22] = {
+ 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty1_bb_a0 = {
+ 0, 22, pglue_b_prty1_bb_a0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static struct attn_hw_reg *pglue_b_prty_bb_a0_regs[1] = {
+ &pglue_b_prty1_bb_a0,
+};
+
+static const u16 pglue_b_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
+ 0, 1, pglue_b_prty0_bb_b0_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198,
+ 0x2a8194
+};
+
+static const u16 pglue_b_prty1_bb_b0_attn_idx[22] = {
+ 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
+ 1, 22, pglue_b_prty1_bb_b0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
+ &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0,
+};
+
+static const u16 pglue_b_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglue_b_prty0_k2 = {
+ 0, 1, pglue_b_prty0_k2_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194
+};
+
+static const u16 pglue_b_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pglue_b_prty1_k2 = {
+ 1, 31, pglue_b_prty1_k2_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+ 0x2a8204
+};
+
+static const u16 pglue_b_prty2_k2_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty2_k2 = {
+ 2, 3, pglue_b_prty2_k2_attn_idx, 0x2a8210, 0x2a821c, 0x2a8218, 0x2a8214
+};
+
+static struct attn_hw_reg *pglue_b_prty_k2_regs[3] = {
+ &pglue_b_prty0_k2, &pglue_b_prty1_k2, &pglue_b_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cnig_int_attn_desc[10] = {
+ "cnig_address_error",
+ "cnig_tx_illegal_sop_port0",
+ "cnig_tx_illegal_sop_port1",
+ "cnig_tx_illegal_sop_port2",
+ "cnig_tx_illegal_sop_port3",
+ "cnig_tdm_lane_0_bandwidth_exceed",
+ "cnig_tdm_lane_1_bandwidth_exceed",
+ "cnig_pmeg_intr",
+ "cnig_pmfc_intr",
+ "cnig_fifo_error",
+};
+#else
+#define cnig_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cnig_int0_bb_a0_attn_idx[4] = {
+ 0, 7, 8, 9,
+};
+
+static struct attn_hw_reg cnig_int0_bb_a0 = {
+ 0, 4, cnig_int0_bb_a0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec
+};
+
+static struct attn_hw_reg *cnig_int_bb_a0_regs[1] = {
+ &cnig_int0_bb_a0,
+};
+
+static const u16 cnig_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 3, 7, 8, 9,
+};
+
+static struct attn_hw_reg cnig_int0_bb_b0 = {
+ 0, 6, cnig_int0_bb_b0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec
+};
+
+static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
+ &cnig_int0_bb_b0,
+};
+
+static const u16 cnig_int0_k2_attn_idx[7] = {
+ 0, 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg cnig_int0_k2 = {
+ 0, 7, cnig_int0_k2_attn_idx, 0x218218, 0x218224, 0x218220, 0x21821c
+};
+
+static struct attn_hw_reg *cnig_int_k2_regs[1] = {
+ &cnig_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cnig_prty_attn_desc[3] = {
+ "cnig_unused_0",
+ "cnig_datapath_tx",
+ "cnig_datapath_rx",
+};
+#else
+#define cnig_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cnig_prty0_bb_b0_attn_idx[2] = {
+ 1, 2,
+};
+
+static struct attn_hw_reg cnig_prty0_bb_b0 = {
+ 0, 2, cnig_prty0_bb_b0_attn_idx, 0x218348, 0x218354, 0x218350, 0x21834c
+};
+
+static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
+ &cnig_prty0_bb_b0,
+};
+
+static const u16 cnig_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg cnig_prty0_k2 = {
+ 0, 1, cnig_prty0_k2_attn_idx, 0x21822c, 0x218238, 0x218234, 0x218230
+};
+
+static struct attn_hw_reg *cnig_prty_k2_regs[1] = {
+ &cnig_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cpmu_int_attn_desc[1] = {
+ "cpmu_address_error",
+};
+#else
+#define cpmu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cpmu_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_bb_a0 = {
+ 0, 1, cpmu_int0_bb_a0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_bb_a0_regs[1] = {
+ &cpmu_int0_bb_a0,
+};
+
+static const u16 cpmu_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_bb_b0 = {
+ 0, 1, cpmu_int0_bb_b0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
+ &cpmu_int0_bb_b0,
+};
+
+static const u16 cpmu_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg cpmu_int0_k2 = {
+ 0, 1, cpmu_int0_k2_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_k2_regs[1] = {
+ &cpmu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ncsi_int_attn_desc[1] = {
+ "ncsi_address_error",
+};
+#else
+#define ncsi_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ncsi_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_bb_a0 = {
+ 0, 1, ncsi_int0_bb_a0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_bb_a0_regs[1] = {
+ &ncsi_int0_bb_a0,
+};
+
+static const u16 ncsi_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_bb_b0 = {
+ 0, 1, ncsi_int0_bb_b0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
+ &ncsi_int0_bb_b0,
+};
+
+static const u16 ncsi_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_int0_k2 = {
+ 0, 1, ncsi_int0_k2_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_k2_regs[1] = {
+ &ncsi_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ncsi_prty_attn_desc[1] = {
+ "ncsi_mem002_i_mem_prty",
+};
+#else
+#define ncsi_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ncsi_prty1_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_bb_a0 = {
+ 0, 1, ncsi_prty1_bb_a0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_bb_a0_regs[1] = {
+ &ncsi_prty1_bb_a0,
+};
+
+static const u16 ncsi_prty1_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_bb_b0 = {
+ 0, 1, ncsi_prty1_bb_b0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
+ &ncsi_prty1_bb_b0,
+};
+
+static const u16 ncsi_prty1_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ncsi_prty1_k2 = {
+ 0, 1, ncsi_prty1_k2_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_k2_regs[1] = {
+ &ncsi_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *opte_prty_attn_desc[12] = {
+ "opte_mem009_i_mem_prty",
+ "opte_mem010_i_mem_prty",
+ "opte_mem005_i_mem_prty",
+ "opte_mem006_i_mem_prty",
+ "opte_mem007_i_mem_prty",
+ "opte_mem008_i_mem_prty",
+ "opte_mem001_i_mem_prty",
+ "opte_mem002_i_mem_prty",
+ "opte_mem003_i_mem_prty",
+ "opte_mem004_i_mem_prty",
+ "opte_mem011_i_mem_prty",
+ "opte_datapath_parity_error",
+};
+#else
+#define opte_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 opte_prty1_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_bb_a0 = {
+ 0, 11, opte_prty1_bb_a0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static struct attn_hw_reg *opte_prty_bb_a0_regs[1] = {
+ &opte_prty1_bb_a0,
+};
+
+static const u16 opte_prty1_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_bb_b0 = {
+ 0, 11, opte_prty1_bb_b0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static const u16 opte_prty0_bb_b0_attn_idx[1] = {
+ 11,
+};
+
+static struct attn_hw_reg opte_prty0_bb_b0 = {
+ 1, 1, opte_prty0_bb_b0_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c
+};
+
+static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
+ &opte_prty1_bb_b0, &opte_prty0_bb_b0,
+};
+
+static const u16 opte_prty1_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_k2 = {
+ 0, 11, opte_prty1_k2_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static const u16 opte_prty0_k2_attn_idx[1] = {
+ 11,
+};
+
+static struct attn_hw_reg opte_prty0_k2 = {
+ 1, 1, opte_prty0_k2_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c
+};
+
+static struct attn_hw_reg *opte_prty_k2_regs[2] = {
+ &opte_prty1_k2, &opte_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmb_int_attn_desc[297] = {
+ "bmb_address_error",
+ "bmb_rc_pkt0_rls_error",
+ "bmb_unused_0",
+ "bmb_rc_pkt0_protocol_error",
+ "bmb_rc_pkt1_rls_error",
+ "bmb_unused_1",
+ "bmb_rc_pkt1_protocol_error",
+ "bmb_rc_pkt2_rls_error",
+ "bmb_unused_2",
+ "bmb_rc_pkt2_protocol_error",
+ "bmb_rc_pkt3_rls_error",
+ "bmb_unused_3",
+ "bmb_rc_pkt3_protocol_error",
+ "bmb_rc_sop_req_tc_port_error",
+ "bmb_unused_4",
+ "bmb_wc0_protocol_error",
+ "bmb_wc1_protocol_error",
+ "bmb_wc2_protocol_error",
+ "bmb_wc3_protocol_error",
+ "bmb_unused_5",
+ "bmb_ll_blk_error",
+ "bmb_unused_6",
+ "bmb_mac0_fc_cnt_error",
+ "bmb_ll_arb_calc_error",
+ "bmb_wc0_inp_fifo_error",
+ "bmb_wc0_sop_fifo_error",
+ "bmb_wc0_len_fifo_error",
+ "bmb_wc0_queue_fifo_error",
+ "bmb_wc0_free_point_fifo_error",
+ "bmb_wc0_next_point_fifo_error",
+ "bmb_wc0_strt_fifo_error",
+ "bmb_wc0_second_dscr_fifo_error",
+ "bmb_wc0_pkt_avail_fifo_error",
+ "bmb_wc0_cos_cnt_fifo_error",
+ "bmb_wc0_notify_fifo_error",
+ "bmb_wc0_ll_req_fifo_error",
+ "bmb_wc0_ll_pa_cnt_error",
+ "bmb_wc0_bb_pa_cnt_error",
+ "bmb_wc1_inp_fifo_error",
+ "bmb_wc1_sop_fifo_error",
+ "bmb_wc1_queue_fifo_error",
+ "bmb_wc1_free_point_fifo_error",
+ "bmb_wc1_next_point_fifo_error",
+ "bmb_wc1_strt_fifo_error",
+ "bmb_wc1_second_dscr_fifo_error",
+ "bmb_wc1_pkt_avail_fifo_error",
+ "bmb_wc1_cos_cnt_fifo_error",
+ "bmb_wc1_notify_fifo_error",
+ "bmb_wc1_ll_req_fifo_error",
+ "bmb_wc1_ll_pa_cnt_error",
+ "bmb_wc1_bb_pa_cnt_error",
+ "bmb_wc2_inp_fifo_error",
+ "bmb_wc2_sop_fifo_error",
+ "bmb_wc2_queue_fifo_error",
+ "bmb_wc2_free_point_fifo_error",
+ "bmb_wc2_next_point_fifo_error",
+ "bmb_wc2_strt_fifo_error",
+ "bmb_wc2_second_dscr_fifo_error",
+ "bmb_wc2_pkt_avail_fifo_error",
+ "bmb_wc2_cos_cnt_fifo_error",
+ "bmb_wc2_notify_fifo_error",
+ "bmb_wc2_ll_req_fifo_error",
+ "bmb_wc2_ll_pa_cnt_error",
+ "bmb_wc2_bb_pa_cnt_error",
+ "bmb_wc3_inp_fifo_error",
+ "bmb_wc3_sop_fifo_error",
+ "bmb_wc3_queue_fifo_error",
+ "bmb_wc3_free_point_fifo_error",
+ "bmb_wc3_next_point_fifo_error",
+ "bmb_wc3_strt_fifo_error",
+ "bmb_wc3_second_dscr_fifo_error",
+ "bmb_wc3_pkt_avail_fifo_error",
+ "bmb_wc3_cos_cnt_fifo_error",
+ "bmb_wc3_notify_fifo_error",
+ "bmb_wc3_ll_req_fifo_error",
+ "bmb_wc3_ll_pa_cnt_error",
+ "bmb_wc3_bb_pa_cnt_error",
+ "bmb_rc_pkt0_side_fifo_error",
+ "bmb_rc_pkt0_req_fifo_error",
+ "bmb_rc_pkt0_blk_fifo_error",
+ "bmb_rc_pkt0_rls_left_fifo_error",
+ "bmb_rc_pkt0_strt_ptr_fifo_error",
+ "bmb_rc_pkt0_second_ptr_fifo_error",
+ "bmb_rc_pkt0_rsp_fifo_error",
+ "bmb_rc_pkt0_dscr_fifo_error",
+ "bmb_rc_pkt1_side_fifo_error",
+ "bmb_rc_pkt1_req_fifo_error",
+ "bmb_rc_pkt1_blk_fifo_error",
+ "bmb_rc_pkt1_rls_left_fifo_error",
+ "bmb_rc_pkt1_strt_ptr_fifo_error",
+ "bmb_rc_pkt1_second_ptr_fifo_error",
+ "bmb_rc_pkt1_rsp_fifo_error",
+ "bmb_rc_pkt1_dscr_fifo_error",
+ "bmb_rc_pkt2_side_fifo_error",
+ "bmb_rc_pkt2_req_fifo_error",
+ "bmb_rc_pkt2_blk_fifo_error",
+ "bmb_rc_pkt2_rls_left_fifo_error",
+ "bmb_rc_pkt2_strt_ptr_fifo_error",
+ "bmb_rc_pkt2_second_ptr_fifo_error",
+ "bmb_rc_pkt2_rsp_fifo_error",
+ "bmb_rc_pkt2_dscr_fifo_error",
+ "bmb_rc_pkt3_side_fifo_error",
+ "bmb_rc_pkt3_req_fifo_error",
+ "bmb_rc_pkt3_blk_fifo_error",
+ "bmb_rc_pkt3_rls_left_fifo_error",
+ "bmb_rc_pkt3_strt_ptr_fifo_error",
+ "bmb_rc_pkt3_second_ptr_fifo_error",
+ "bmb_rc_pkt3_rsp_fifo_error",
+ "bmb_rc_pkt3_dscr_fifo_error",
+ "bmb_rc_sop_strt_fifo_error",
+ "bmb_rc_sop_req_fifo_error",
+ "bmb_rc_sop_dscr_fifo_error",
+ "bmb_rc_sop_queue_fifo_error",
+ "bmb_ll_arb_rls_fifo_error",
+ "bmb_ll_arb_prefetch_fifo_error",
+ "bmb_rc_pkt0_rls_fifo_error",
+ "bmb_rc_pkt1_rls_fifo_error",
+ "bmb_rc_pkt2_rls_fifo_error",
+ "bmb_rc_pkt3_rls_fifo_error",
+ "bmb_rc_pkt4_rls_fifo_error",
+ "bmb_rc_pkt5_rls_fifo_error",
+ "bmb_rc_pkt6_rls_fifo_error",
+ "bmb_rc_pkt7_rls_fifo_error",
+ "bmb_rc_pkt8_rls_fifo_error",
+ "bmb_rc_pkt9_rls_fifo_error",
+ "bmb_rc_pkt4_rls_error",
+ "bmb_rc_pkt4_protocol_error",
+ "bmb_rc_pkt4_side_fifo_error",
+ "bmb_rc_pkt4_req_fifo_error",
+ "bmb_rc_pkt4_blk_fifo_error",
+ "bmb_rc_pkt4_rls_left_fifo_error",
+ "bmb_rc_pkt4_strt_ptr_fifo_error",
+ "bmb_rc_pkt4_second_ptr_fifo_error",
+ "bmb_rc_pkt4_rsp_fifo_error",
+ "bmb_rc_pkt4_dscr_fifo_error",
+ "bmb_rc_pkt5_rls_error",
+ "bmb_rc_pkt5_protocol_error",
+ "bmb_rc_pkt5_side_fifo_error",
+ "bmb_rc_pkt5_req_fifo_error",
+ "bmb_rc_pkt5_blk_fifo_error",
+ "bmb_rc_pkt5_rls_left_fifo_error",
+ "bmb_rc_pkt5_strt_ptr_fifo_error",
+ "bmb_rc_pkt5_second_ptr_fifo_error",
+ "bmb_rc_pkt5_rsp_fifo_error",
+ "bmb_rc_pkt5_dscr_fifo_error",
+ "bmb_rc_pkt6_rls_error",
+ "bmb_rc_pkt6_protocol_error",
+ "bmb_rc_pkt6_side_fifo_error",
+ "bmb_rc_pkt6_req_fifo_error",
+ "bmb_rc_pkt6_blk_fifo_error",
+ "bmb_rc_pkt6_rls_left_fifo_error",
+ "bmb_rc_pkt6_strt_ptr_fifo_error",
+ "bmb_rc_pkt6_second_ptr_fifo_error",
+ "bmb_rc_pkt6_rsp_fifo_error",
+ "bmb_rc_pkt6_dscr_fifo_error",
+ "bmb_rc_pkt7_rls_error",
+ "bmb_rc_pkt7_protocol_error",
+ "bmb_rc_pkt7_side_fifo_error",
+ "bmb_rc_pkt7_req_fifo_error",
+ "bmb_rc_pkt7_blk_fifo_error",
+ "bmb_rc_pkt7_rls_left_fifo_error",
+ "bmb_rc_pkt7_strt_ptr_fifo_error",
+ "bmb_rc_pkt7_second_ptr_fifo_error",
+ "bmb_rc_pkt7_rsp_fifo_error",
+ "bmb_packet_available_sync_fifo_push_error",
+ "bmb_rc_pkt8_rls_error",
+ "bmb_rc_pkt8_protocol_error",
+ "bmb_rc_pkt8_side_fifo_error",
+ "bmb_rc_pkt8_req_fifo_error",
+ "bmb_rc_pkt8_blk_fifo_error",
+ "bmb_rc_pkt8_rls_left_fifo_error",
+ "bmb_rc_pkt8_strt_ptr_fifo_error",
+ "bmb_rc_pkt8_second_ptr_fifo_error",
+ "bmb_rc_pkt8_rsp_fifo_error",
+ "bmb_rc_pkt8_dscr_fifo_error",
+ "bmb_rc_pkt9_rls_error",
+ "bmb_rc_pkt9_protocol_error",
+ "bmb_rc_pkt9_side_fifo_error",
+ "bmb_rc_pkt9_req_fifo_error",
+ "bmb_rc_pkt9_blk_fifo_error",
+ "bmb_rc_pkt9_rls_left_fifo_error",
+ "bmb_rc_pkt9_strt_ptr_fifo_error",
+ "bmb_rc_pkt9_second_ptr_fifo_error",
+ "bmb_rc_pkt9_rsp_fifo_error",
+ "bmb_rc_pkt9_dscr_fifo_error",
+ "bmb_wc4_protocol_error",
+ "bmb_wc5_protocol_error",
+ "bmb_wc6_protocol_error",
+ "bmb_wc7_protocol_error",
+ "bmb_wc8_protocol_error",
+ "bmb_wc9_protocol_error",
+ "bmb_wc4_inp_fifo_error",
+ "bmb_wc4_sop_fifo_error",
+ "bmb_wc4_queue_fifo_error",
+ "bmb_wc4_free_point_fifo_error",
+ "bmb_wc4_next_point_fifo_error",
+ "bmb_wc4_strt_fifo_error",
+ "bmb_wc4_second_dscr_fifo_error",
+ "bmb_wc4_pkt_avail_fifo_error",
+ "bmb_wc4_cos_cnt_fifo_error",
+ "bmb_wc4_notify_fifo_error",
+ "bmb_wc4_ll_req_fifo_error",
+ "bmb_wc4_ll_pa_cnt_error",
+ "bmb_wc4_bb_pa_cnt_error",
+ "bmb_wc5_inp_fifo_error",
+ "bmb_wc5_sop_fifo_error",
+ "bmb_wc5_queue_fifo_error",
+ "bmb_wc5_free_point_fifo_error",
+ "bmb_wc5_next_point_fifo_error",
+ "bmb_wc5_strt_fifo_error",
+ "bmb_wc5_second_dscr_fifo_error",
+ "bmb_wc5_pkt_avail_fifo_error",
+ "bmb_wc5_cos_cnt_fifo_error",
+ "bmb_wc5_notify_fifo_error",
+ "bmb_wc5_ll_req_fifo_error",
+ "bmb_wc5_ll_pa_cnt_error",
+ "bmb_wc5_bb_pa_cnt_error",
+ "bmb_wc6_inp_fifo_error",
+ "bmb_wc6_sop_fifo_error",
+ "bmb_wc6_queue_fifo_error",
+ "bmb_wc6_free_point_fifo_error",
+ "bmb_wc6_next_point_fifo_error",
+ "bmb_wc6_strt_fifo_error",
+ "bmb_wc6_second_dscr_fifo_error",
+ "bmb_wc6_pkt_avail_fifo_error",
+ "bmb_wc6_cos_cnt_fifo_error",
+ "bmb_wc6_notify_fifo_error",
+ "bmb_wc6_ll_req_fifo_error",
+ "bmb_wc6_ll_pa_cnt_error",
+ "bmb_wc6_bb_pa_cnt_error",
+ "bmb_wc7_inp_fifo_error",
+ "bmb_wc7_sop_fifo_error",
+ "bmb_wc7_queue_fifo_error",
+ "bmb_wc7_free_point_fifo_error",
+ "bmb_wc7_next_point_fifo_error",
+ "bmb_wc7_strt_fifo_error",
+ "bmb_wc7_second_dscr_fifo_error",
+ "bmb_wc7_pkt_avail_fifo_error",
+ "bmb_wc7_cos_cnt_fifo_error",
+ "bmb_wc7_notify_fifo_error",
+ "bmb_wc7_ll_req_fifo_error",
+ "bmb_wc7_ll_pa_cnt_error",
+ "bmb_wc7_bb_pa_cnt_error",
+ "bmb_wc8_inp_fifo_error",
+ "bmb_wc8_sop_fifo_error",
+ "bmb_wc8_queue_fifo_error",
+ "bmb_wc8_free_point_fifo_error",
+ "bmb_wc8_next_point_fifo_error",
+ "bmb_wc8_strt_fifo_error",
+ "bmb_wc8_second_dscr_fifo_error",
+ "bmb_wc8_pkt_avail_fifo_error",
+ "bmb_wc8_cos_cnt_fifo_error",
+ "bmb_wc8_notify_fifo_error",
+ "bmb_wc8_ll_req_fifo_error",
+ "bmb_wc8_ll_pa_cnt_error",
+ "bmb_wc8_bb_pa_cnt_error",
+ "bmb_wc9_inp_fifo_error",
+ "bmb_wc9_sop_fifo_error",
+ "bmb_wc9_queue_fifo_error",
+ "bmb_wc9_free_point_fifo_error",
+ "bmb_wc9_next_point_fifo_error",
+ "bmb_wc9_strt_fifo_error",
+ "bmb_wc9_second_dscr_fifo_error",
+ "bmb_wc9_pkt_avail_fifo_error",
+ "bmb_wc9_cos_cnt_fifo_error",
+ "bmb_wc9_notify_fifo_error",
+ "bmb_wc9_ll_req_fifo_error",
+ "bmb_wc9_ll_pa_cnt_error",
+ "bmb_wc9_bb_pa_cnt_error",
+ "bmb_rc9_sop_rc_out_sync_fifo_error",
+ "bmb_rc9_sop_out_sync_fifo_push_error",
+ "bmb_rc0_sop_pend_fifo_error",
+ "bmb_rc1_sop_pend_fifo_error",
+ "bmb_rc2_sop_pend_fifo_error",
+ "bmb_rc3_sop_pend_fifo_error",
+ "bmb_rc4_sop_pend_fifo_error",
+ "bmb_rc5_sop_pend_fifo_error",
+ "bmb_rc6_sop_pend_fifo_error",
+ "bmb_rc7_sop_pend_fifo_error",
+ "bmb_rc0_dscr_pend_fifo_error",
+ "bmb_rc1_dscr_pend_fifo_error",
+ "bmb_rc2_dscr_pend_fifo_error",
+ "bmb_rc3_dscr_pend_fifo_error",
+ "bmb_rc4_dscr_pend_fifo_error",
+ "bmb_rc5_dscr_pend_fifo_error",
+ "bmb_rc6_dscr_pend_fifo_error",
+ "bmb_rc7_dscr_pend_fifo_error",
+ "bmb_rc8_sop_inp_sync_fifo_push_error",
+ "bmb_rc9_sop_inp_sync_fifo_push_error",
+ "bmb_rc8_sop_out_sync_fifo_push_error",
+ "bmb_rc_gnt_pend_fifo_error",
+ "bmb_rc8_out_sync_fifo_push_error",
+ "bmb_rc9_out_sync_fifo_push_error",
+ "bmb_wc8_sync_fifo_push_error",
+ "bmb_wc9_sync_fifo_push_error",
+ "bmb_rc8_sop_rc_out_sync_fifo_error",
+ "bmb_rc_pkt7_dscr_fifo_error",
+};
+#else
+#define bmb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmb_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_bb_a0 = {
+ 0, 16, bmb_int0_bb_a0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_bb_a0_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_bb_a0 = {
+ 1, 28, bmb_int1_bb_a0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_bb_a0_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_bb_a0 = {
+ 2, 26, bmb_int2_bb_a0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_bb_a0_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_bb_a0 = {
+ 3, 31, bmb_int3_bb_a0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_bb_a0_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_bb_a0 = {
+ 4, 27, bmb_int4_bb_a0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_bb_a0_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_bb_a0 = {
+ 5, 29, bmb_int5_bb_a0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_bb_a0_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_bb_a0 = {
+ 6, 30, bmb_int6_bb_a0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_bb_a0_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_bb_a0 = {
+ 7, 32, bmb_int7_bb_a0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_bb_a0_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_bb_a0 = {
+ 8, 32, bmb_int8_bb_a0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_bb_a0_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_bb_a0 = {
+ 9, 32, bmb_int9_bb_a0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_bb_a0_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_bb_a0 = {
+ 10, 3, bmb_int10_bb_a0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_bb_a0_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_bb_a0 = {
+ 11, 4, bmb_int11_bb_a0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_bb_a0_regs[12] = {
+ &bmb_int0_bb_a0, &bmb_int1_bb_a0, &bmb_int2_bb_a0, &bmb_int3_bb_a0,
+ &bmb_int4_bb_a0, &bmb_int5_bb_a0, &bmb_int6_bb_a0, &bmb_int7_bb_a0,
+ &bmb_int8_bb_a0, &bmb_int9_bb_a0,
+ &bmb_int10_bb_a0, &bmb_int11_bb_a0,
+};
+
+static const u16 bmb_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_bb_b0 = {
+ 0, 16, bmb_int0_bb_b0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_bb_b0_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_bb_b0 = {
+ 1, 28, bmb_int1_bb_b0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_bb_b0_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_bb_b0 = {
+ 2, 26, bmb_int2_bb_b0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_bb_b0_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_bb_b0 = {
+ 3, 31, bmb_int3_bb_b0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_bb_b0_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_bb_b0 = {
+ 4, 27, bmb_int4_bb_b0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_bb_b0_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_bb_b0 = {
+ 5, 29, bmb_int5_bb_b0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_bb_b0_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_bb_b0 = {
+ 6, 30, bmb_int6_bb_b0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_bb_b0_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_bb_b0 = {
+ 7, 32, bmb_int7_bb_b0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_bb_b0_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_bb_b0 = {
+ 8, 32, bmb_int8_bb_b0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_bb_b0_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_bb_b0 = {
+ 9, 32, bmb_int9_bb_b0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_bb_b0_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_bb_b0 = {
+ 10, 3, bmb_int10_bb_b0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_bb_b0_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_bb_b0 = {
+ 11, 4, bmb_int11_bb_b0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
+ &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
+ &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
+ &bmb_int8_bb_b0, &bmb_int9_bb_b0,
+ &bmb_int10_bb_b0, &bmb_int11_bb_b0,
+};
+
+static const u16 bmb_int0_k2_attn_idx[16] = {
+ 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_k2 = {
+ 0, 16, bmb_int0_k2_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_k2_attn_idx[28] = {
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_k2 = {
+ 1, 28, bmb_int1_k2_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_k2_attn_idx[26] = {
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_k2 = {
+ 2, 26, bmb_int2_k2_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_k2_attn_idx[31] = {
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_k2 = {
+ 3, 31, bmb_int3_k2_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_k2_attn_idx[27] = {
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_k2 = {
+ 4, 27, bmb_int4_k2_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_k2_attn_idx[29] = {
+ 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149,
+ 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_k2 = {
+ 5, 29, bmb_int5_k2_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_k2_attn_idx[30] = {
+ 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193,
+};
+
+static struct attn_hw_reg bmb_int6_k2 = {
+ 6, 30, bmb_int6_k2_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_k2_attn_idx[32] = {
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+ 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224,
+ 225,
+};
+
+static struct attn_hw_reg bmb_int7_k2 = {
+ 7, 32, bmb_int7_k2_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_k2_attn_idx[32] = {
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256,
+ 257,
+};
+
+static struct attn_hw_reg bmb_int8_k2 = {
+ 8, 32, bmb_int8_k2_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_k2_attn_idx[32] = {
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288,
+ 289,
+};
+
+static struct attn_hw_reg bmb_int9_k2 = {
+ 9, 32, bmb_int9_k2_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_k2_attn_idx[3] = {
+ 290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_k2 = {
+ 10, 3, bmb_int10_k2_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_k2_attn_idx[4] = {
+ 293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_k2 = {
+ 11, 4, bmb_int11_k2_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_k2_regs[12] = {
+ &bmb_int0_k2, &bmb_int1_k2, &bmb_int2_k2, &bmb_int3_k2, &bmb_int4_k2,
+ &bmb_int5_k2, &bmb_int6_k2, &bmb_int7_k2, &bmb_int8_k2, &bmb_int9_k2,
+ &bmb_int10_k2, &bmb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmb_prty_attn_desc[61] = {
+ "bmb_ll_bank0_mem_prty",
+ "bmb_ll_bank1_mem_prty",
+ "bmb_ll_bank2_mem_prty",
+ "bmb_ll_bank3_mem_prty",
+ "bmb_datapath_registers",
+ "bmb_mem001_i_ecc_rf_int",
+ "bmb_mem008_i_ecc_rf_int",
+ "bmb_mem009_i_ecc_rf_int",
+ "bmb_mem010_i_ecc_rf_int",
+ "bmb_mem011_i_ecc_rf_int",
+ "bmb_mem012_i_ecc_rf_int",
+ "bmb_mem013_i_ecc_rf_int",
+ "bmb_mem014_i_ecc_rf_int",
+ "bmb_mem015_i_ecc_rf_int",
+ "bmb_mem016_i_ecc_rf_int",
+ "bmb_mem002_i_ecc_rf_int",
+ "bmb_mem003_i_ecc_rf_int",
+ "bmb_mem004_i_ecc_rf_int",
+ "bmb_mem005_i_ecc_rf_int",
+ "bmb_mem006_i_ecc_rf_int",
+ "bmb_mem007_i_ecc_rf_int",
+ "bmb_mem059_i_mem_prty",
+ "bmb_mem060_i_mem_prty",
+ "bmb_mem037_i_mem_prty",
+ "bmb_mem038_i_mem_prty",
+ "bmb_mem039_i_mem_prty",
+ "bmb_mem040_i_mem_prty",
+ "bmb_mem041_i_mem_prty",
+ "bmb_mem042_i_mem_prty",
+ "bmb_mem043_i_mem_prty",
+ "bmb_mem044_i_mem_prty",
+ "bmb_mem045_i_mem_prty",
+ "bmb_mem046_i_mem_prty",
+ "bmb_mem047_i_mem_prty",
+ "bmb_mem048_i_mem_prty",
+ "bmb_mem049_i_mem_prty",
+ "bmb_mem050_i_mem_prty",
+ "bmb_mem051_i_mem_prty",
+ "bmb_mem052_i_mem_prty",
+ "bmb_mem053_i_mem_prty",
+ "bmb_mem054_i_mem_prty",
+ "bmb_mem055_i_mem_prty",
+ "bmb_mem056_i_mem_prty",
+ "bmb_mem057_i_mem_prty",
+ "bmb_mem058_i_mem_prty",
+ "bmb_mem033_i_mem_prty",
+ "bmb_mem034_i_mem_prty",
+ "bmb_mem035_i_mem_prty",
+ "bmb_mem036_i_mem_prty",
+ "bmb_mem021_i_mem_prty",
+ "bmb_mem022_i_mem_prty",
+ "bmb_mem023_i_mem_prty",
+ "bmb_mem024_i_mem_prty",
+ "bmb_mem025_i_mem_prty",
+ "bmb_mem026_i_mem_prty",
+ "bmb_mem027_i_mem_prty",
+ "bmb_mem028_i_mem_prty",
+ "bmb_mem029_i_mem_prty",
+ "bmb_mem030_i_mem_prty",
+ "bmb_mem031_i_mem_prty",
+ "bmb_mem032_i_mem_prty",
+};
+#else
+#define bmb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmb_prty1_bb_a0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_bb_a0 = {
+ 0, 31, bmb_prty1_bb_a0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_bb_a0_attn_idx[25] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60,
+};
+
+static struct attn_hw_reg bmb_prty2_bb_a0 = {
+ 1, 25, bmb_prty2_bb_a0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_bb_a0_regs[2] = {
+ &bmb_prty1_bb_a0, &bmb_prty2_bb_a0,
+};
+
+static const u16 bmb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg bmb_prty0_bb_b0 = {
+ 0, 5, bmb_prty0_bb_b0_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0
+};
+
+static const u16 bmb_prty1_bb_b0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_bb_b0 = {
+ 1, 31, bmb_prty1_bb_b0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_bb_b0_attn_idx[15] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_prty2_bb_b0 = {
+ 2, 15, bmb_prty2_bb_b0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
+ &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0,
+};
+
+static const u16 bmb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg bmb_prty0_k2 = {
+ 0, 5, bmb_prty0_k2_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0
+};
+
+static const u16 bmb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_k2 = {
+ 1, 31, bmb_prty1_k2_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_k2_attn_idx[15] = {
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_prty2_k2 = {
+ 2, 15, bmb_prty2_k2_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_k2_regs[3] = {
+ &bmb_prty0_k2, &bmb_prty1_k2, &bmb_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcie_int_attn_desc[17] = {
+ "pcie_address_error",
+ "pcie_link_down_detect",
+ "pcie_link_up_detect",
+ "pcie_cfg_link_eq_req_int",
+ "pcie_pcie_bandwidth_change_detect",
+ "pcie_early_hot_reset_detect",
+ "pcie_hot_reset_detect",
+ "pcie_l1_entry_detect",
+ "pcie_l1_exit_detect",
+ "pcie_ltssm_state_match_detect",
+ "pcie_fc_timeout_detect",
+ "pcie_pme_turnoff_message_detect",
+ "pcie_cfg_send_cor_err",
+ "pcie_cfg_send_nf_err",
+ "pcie_cfg_send_f_err",
+ "pcie_qoverflow_detect",
+ "pcie_vdm_detect",
+};
+#else
+#define pcie_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcie_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg pcie_int0_k2 = {
+ 0, 17, pcie_int0_k2_attn_idx, 0x547a0, 0x547ac, 0x547a8, 0x547a4
+};
+
+static struct attn_hw_reg *pcie_int_k2_regs[1] = {
+ &pcie_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcie_prty_attn_desc[24] = {
+ "pcie_mem003_i_ecc_rf_int",
+ "pcie_mem004_i_ecc_rf_int",
+ "pcie_mem008_i_mem_prty",
+ "pcie_mem007_i_mem_prty",
+ "pcie_mem005_i_mem_prty",
+ "pcie_mem006_i_mem_prty",
+ "pcie_mem001_i_mem_prty",
+ "pcie_mem002_i_mem_prty",
+ "pcie_mem001_i_ecc_rf_int",
+ "pcie_mem005_i_ecc_rf_int",
+ "pcie_mem010_i_ecc_rf_int",
+ "pcie_mem009_i_ecc_rf_int",
+ "pcie_mem007_i_ecc_rf_int",
+ "pcie_mem004_i_mem_prty_0",
+ "pcie_mem004_i_mem_prty_1",
+ "pcie_mem004_i_mem_prty_2",
+ "pcie_mem004_i_mem_prty_3",
+ "pcie_mem011_i_mem_prty_1",
+ "pcie_mem011_i_mem_prty_2",
+ "pcie_mem012_i_mem_prty_1",
+ "pcie_mem012_i_mem_prty_2",
+ "pcie_app_parity_errs_0",
+ "pcie_app_parity_errs_1",
+ "pcie_app_parity_errs_2",
+};
+#else
+#define pcie_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcie_prty1_bb_a0_attn_idx[17] = {
+ 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+};
+
+static struct attn_hw_reg pcie_prty1_bb_a0 = {
+ 0, 17, pcie_prty1_bb_a0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static struct attn_hw_reg *pcie_prty_bb_a0_regs[1] = {
+ &pcie_prty1_bb_a0,
+};
+
+static const u16 pcie_prty1_bb_b0_attn_idx[17] = {
+ 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+};
+
+static struct attn_hw_reg pcie_prty1_bb_b0 = {
+ 0, 17, pcie_prty1_bb_b0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
+ &pcie_prty1_bb_b0,
+};
+
+static const u16 pcie_prty1_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg pcie_prty1_k2 = {
+ 0, 8, pcie_prty1_k2_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static const u16 pcie_prty0_k2_attn_idx[3] = {
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg pcie_prty0_k2 = {
+ 1, 3, pcie_prty0_k2_attn_idx, 0x547b0, 0x547bc, 0x547b8, 0x547b4
+};
+
+static struct attn_hw_reg *pcie_prty_k2_regs[2] = {
+ &pcie_prty1_k2, &pcie_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcp2_prty_attn_desc[13] = {
+ "mcp2_rom_parity",
+ "mcp2_mem001_i_ecc_rf_int",
+ "mcp2_mem006_i_ecc_0_rf_int",
+ "mcp2_mem006_i_ecc_1_rf_int",
+ "mcp2_mem006_i_ecc_2_rf_int",
+ "mcp2_mem006_i_ecc_3_rf_int",
+ "mcp2_mem007_i_ecc_rf_int",
+ "mcp2_mem004_i_mem_prty",
+ "mcp2_mem003_i_mem_prty",
+ "mcp2_mem002_i_mem_prty",
+ "mcp2_mem009_i_mem_prty",
+ "mcp2_mem008_i_mem_prty",
+ "mcp2_mem005_i_mem_prty",
+};
+#else
+#define mcp2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcp2_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_bb_a0 = {
+ 0, 1, mcp2_prty0_bb_a0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_bb_a0_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_bb_a0 = {
+ 1, 12, mcp2_prty1_bb_a0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_bb_a0_regs[2] = {
+ &mcp2_prty0_bb_a0, &mcp2_prty1_bb_a0,
+};
+
+static const u16 mcp2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_bb_b0 = {
+ 0, 1, mcp2_prty0_bb_b0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_bb_b0_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_bb_b0 = {
+ 1, 12, mcp2_prty1_bb_b0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
+ &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0,
+};
+
+static const u16 mcp2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg mcp2_prty0_k2 = {
+ 0, 1, mcp2_prty0_k2_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_k2_attn_idx[12] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_k2 = {
+ 1, 12, mcp2_prty1_k2_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_k2_regs[2] = {
+ &mcp2_prty0_k2, &mcp2_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst_int_attn_desc[18] = {
+ "pswhst_address_error",
+ "pswhst_hst_src_fifo1_err",
+ "pswhst_hst_src_fifo2_err",
+ "pswhst_hst_src_fifo3_err",
+ "pswhst_hst_src_fifo4_err",
+ "pswhst_hst_src_fifo5_err",
+ "pswhst_hst_hdr_sync_fifo_err",
+ "pswhst_hst_data_sync_fifo_err",
+ "pswhst_hst_cpl_sync_fifo_err",
+ "pswhst_hst_vf_disabled_access",
+ "pswhst_hst_permission_violation",
+ "pswhst_hst_incorrect_access",
+ "pswhst_hst_src_fifo6_err",
+ "pswhst_hst_src_fifo7_err",
+ "pswhst_hst_src_fifo8_err",
+ "pswhst_hst_src_fifo9_err",
+ "pswhst_hst_source_credit_violation",
+ "pswhst_hst_timeout",
+};
+#else
+#define pswhst_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst_int0_bb_a0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_bb_a0 = {
+ 0, 18, pswhst_int0_bb_a0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188,
+ 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_bb_a0_regs[1] = {
+ &pswhst_int0_bb_a0,
+};
+
+static const u16 pswhst_int0_bb_b0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_bb_b0 = {
+ 0, 18, pswhst_int0_bb_b0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188,
+ 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
+ &pswhst_int0_bb_b0,
+};
+
+static const u16 pswhst_int0_k2_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_k2 = {
+ 0, 18, pswhst_int0_k2_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_k2_regs[1] = {
+ &pswhst_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst_prty_attn_desc[18] = {
+ "pswhst_datapath_registers",
+ "pswhst_mem006_i_mem_prty",
+ "pswhst_mem007_i_mem_prty",
+ "pswhst_mem005_i_mem_prty",
+ "pswhst_mem002_i_mem_prty",
+ "pswhst_mem003_i_mem_prty",
+ "pswhst_mem001_i_mem_prty",
+ "pswhst_mem008_i_mem_prty",
+ "pswhst_mem004_i_mem_prty",
+ "pswhst_mem009_i_mem_prty",
+ "pswhst_mem010_i_mem_prty",
+ "pswhst_mem016_i_mem_prty",
+ "pswhst_mem012_i_mem_prty",
+ "pswhst_mem013_i_mem_prty",
+ "pswhst_mem014_i_mem_prty",
+ "pswhst_mem015_i_mem_prty",
+ "pswhst_mem011_i_mem_prty",
+ "pswhst_mem017_i_mem_prty",
+};
+#else
+#define pswhst_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst_prty1_bb_a0_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_bb_a0 = {
+ 0, 17, pswhst_prty1_bb_a0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208,
+ 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_bb_a0_regs[1] = {
+ &pswhst_prty1_bb_a0,
+};
+
+static const u16 pswhst_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst_prty0_bb_b0 = {
+ 0, 1, pswhst_prty0_bb_b0_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198,
+ 0x2a0194
+};
+
+static const u16 pswhst_prty1_bb_b0_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_bb_b0 = {
+ 1, 17, pswhst_prty1_bb_b0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208,
+ 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
+ &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0,
+};
+
+static const u16 pswhst_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst_prty0_k2 = {
+ 0, 1, pswhst_prty0_k2_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194
+};
+
+static const u16 pswhst_prty1_k2_attn_idx[17] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_k2 = {
+ 1, 17, pswhst_prty1_k2_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_k2_regs[2] = {
+ &pswhst_prty0_k2, &pswhst_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst2_int_attn_desc[5] = {
+ "pswhst2_address_error",
+ "pswhst2_hst_header_fifo_err",
+ "pswhst2_hst_data_fifo_err",
+ "pswhst2_hst_cpl_fifo_err",
+ "pswhst2_hst_ireq_fifo_err",
+};
+#else
+#define pswhst2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst2_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_bb_a0 = {
+ 0, 5, pswhst2_int0_bb_a0_attn_idx, 0x29e180, 0x29e18c, 0x29e188,
+ 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_bb_a0_regs[1] = {
+ &pswhst2_int0_bb_a0,
+};
+
+static const u16 pswhst2_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_bb_b0 = {
+ 0, 5, pswhst2_int0_bb_b0_attn_idx, 0x29e180, 0x29e18c, 0x29e188,
+ 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
+ &pswhst2_int0_bb_b0,
+};
+
+static const u16 pswhst2_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_k2 = {
+ 0, 5, pswhst2_int0_k2_attn_idx, 0x29e180, 0x29e18c, 0x29e188, 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_k2_regs[1] = {
+ &pswhst2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst2_prty_attn_desc[1] = {
+ "pswhst2_datapath_registers",
+};
+#else
+#define pswhst2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
+ 0, 1, pswhst2_prty0_bb_b0_attn_idx, 0x29e190, 0x29e19c, 0x29e198,
+ 0x29e194
+};
+
+static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
+ &pswhst2_prty0_bb_b0,
+};
+
+static const u16 pswhst2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswhst2_prty0_k2 = {
+ 0, 1, pswhst2_prty0_k2_attn_idx, 0x29e190, 0x29e19c, 0x29e198, 0x29e194
+};
+
+static struct attn_hw_reg *pswhst2_prty_k2_regs[1] = {
+ &pswhst2_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd_int_attn_desc[3] = {
+ "pswrd_address_error",
+ "pswrd_pop_error",
+ "pswrd_pop_pbf_error",
+};
+#else
+#define pswrd_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd_int0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_bb_a0 = {
+ 0, 3, pswrd_int0_bb_a0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_bb_a0_regs[1] = {
+ &pswrd_int0_bb_a0,
+};
+
+static const u16 pswrd_int0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_bb_b0 = {
+ 0, 3, pswrd_int0_bb_b0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
+ &pswrd_int0_bb_b0,
+};
+
+static const u16 pswrd_int0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_k2 = {
+ 0, 3, pswrd_int0_k2_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_k2_regs[1] = {
+ &pswrd_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd_prty_attn_desc[1] = {
+ "pswrd_datapath_registers",
+};
+#else
+#define pswrd_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd_prty0_bb_b0 = {
+ 0, 1, pswrd_prty0_bb_b0_attn_idx, 0x29c190, 0x29c19c, 0x29c198,
+ 0x29c194
+};
+
+static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
+ &pswrd_prty0_bb_b0,
+};
+
+static const u16 pswrd_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd_prty0_k2 = {
+ 0, 1, pswrd_prty0_k2_attn_idx, 0x29c190, 0x29c19c, 0x29c198, 0x29c194
+};
+
+static struct attn_hw_reg *pswrd_prty_k2_regs[1] = {
+ &pswrd_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd2_int_attn_desc[5] = {
+ "pswrd2_address_error",
+ "pswrd2_sr_fifo_error",
+ "pswrd2_blk_fifo_error",
+ "pswrd2_push_error",
+ "pswrd2_push_pbf_error",
+};
+#else
+#define pswrd2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd2_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_bb_a0 = {
+ 0, 5, pswrd2_int0_bb_a0_attn_idx, 0x29d180, 0x29d18c, 0x29d188,
+ 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_bb_a0_regs[1] = {
+ &pswrd2_int0_bb_a0,
+};
+
+static const u16 pswrd2_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_bb_b0 = {
+ 0, 5, pswrd2_int0_bb_b0_attn_idx, 0x29d180, 0x29d18c, 0x29d188,
+ 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
+ &pswrd2_int0_bb_b0,
+};
+
+static const u16 pswrd2_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_k2 = {
+ 0, 5, pswrd2_int0_k2_attn_idx, 0x29d180, 0x29d18c, 0x29d188, 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_k2_regs[1] = {
+ &pswrd2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd2_prty_attn_desc[36] = {
+ "pswrd2_datapath_registers",
+ "pswrd2_mem017_i_ecc_rf_int",
+ "pswrd2_mem018_i_ecc_rf_int",
+ "pswrd2_mem019_i_ecc_rf_int",
+ "pswrd2_mem020_i_ecc_rf_int",
+ "pswrd2_mem021_i_ecc_rf_int",
+ "pswrd2_mem022_i_ecc_rf_int",
+ "pswrd2_mem023_i_ecc_rf_int",
+ "pswrd2_mem024_i_ecc_rf_int",
+ "pswrd2_mem025_i_ecc_rf_int",
+ "pswrd2_mem015_i_ecc_rf_int",
+ "pswrd2_mem034_i_mem_prty",
+ "pswrd2_mem032_i_mem_prty",
+ "pswrd2_mem028_i_mem_prty",
+ "pswrd2_mem033_i_mem_prty",
+ "pswrd2_mem030_i_mem_prty",
+ "pswrd2_mem029_i_mem_prty",
+ "pswrd2_mem031_i_mem_prty",
+ "pswrd2_mem027_i_mem_prty",
+ "pswrd2_mem026_i_mem_prty",
+ "pswrd2_mem001_i_mem_prty",
+ "pswrd2_mem007_i_mem_prty",
+ "pswrd2_mem008_i_mem_prty",
+ "pswrd2_mem009_i_mem_prty",
+ "pswrd2_mem010_i_mem_prty",
+ "pswrd2_mem011_i_mem_prty",
+ "pswrd2_mem012_i_mem_prty",
+ "pswrd2_mem013_i_mem_prty",
+ "pswrd2_mem014_i_mem_prty",
+ "pswrd2_mem002_i_mem_prty",
+ "pswrd2_mem003_i_mem_prty",
+ "pswrd2_mem004_i_mem_prty",
+ "pswrd2_mem005_i_mem_prty",
+ "pswrd2_mem006_i_mem_prty",
+ "pswrd2_mem016_i_mem_prty",
+ "pswrd2_mem015_i_mem_prty",
+};
+#else
+#define pswrd2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd2_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+};
+
+static struct attn_hw_reg pswrd2_prty1_bb_a0 = {
+ 0, 31, pswrd2_prty1_bb_a0_attn_idx, 0x29d200, 0x29d20c, 0x29d208,
+ 0x29d204
+};
+
+static const u16 pswrd2_prty2_bb_a0_attn_idx[3] = {
+ 33, 34, 35,
+};
+
+static struct attn_hw_reg pswrd2_prty2_bb_a0 = {
+ 1, 3, pswrd2_prty2_bb_a0_attn_idx, 0x29d210, 0x29d21c, 0x29d218,
+ 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_bb_a0_regs[2] = {
+ &pswrd2_prty1_bb_a0, &pswrd2_prty2_bb_a0,
+};
+
+static const u16 pswrd2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
+ 0, 1, pswrd2_prty0_bb_b0_attn_idx, 0x29d190, 0x29d19c, 0x29d198,
+ 0x29d194
+};
+
+static const u16 pswrd2_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
+ 1, 31, pswrd2_prty1_bb_b0_attn_idx, 0x29d200, 0x29d20c, 0x29d208,
+ 0x29d204
+};
+
+static const u16 pswrd2_prty2_bb_b0_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
+ 2, 3, pswrd2_prty2_bb_b0_attn_idx, 0x29d210, 0x29d21c, 0x29d218,
+ 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
+ &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0,
+};
+
+static const u16 pswrd2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrd2_prty0_k2 = {
+ 0, 1, pswrd2_prty0_k2_attn_idx, 0x29d190, 0x29d19c, 0x29d198, 0x29d194
+};
+
+static const u16 pswrd2_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswrd2_prty1_k2 = {
+ 1, 31, pswrd2_prty1_k2_attn_idx, 0x29d200, 0x29d20c, 0x29d208, 0x29d204
+};
+
+static const u16 pswrd2_prty2_k2_attn_idx[3] = {
+ 32, 33, 34,
+};
+
+static struct attn_hw_reg pswrd2_prty2_k2 = {
+ 2, 3, pswrd2_prty2_k2_attn_idx, 0x29d210, 0x29d21c, 0x29d218, 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_k2_regs[3] = {
+ &pswrd2_prty0_k2, &pswrd2_prty1_k2, &pswrd2_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr_int_attn_desc[16] = {
+ "pswwr_address_error",
+ "pswwr_src_fifo_overflow",
+ "pswwr_qm_fifo_overflow",
+ "pswwr_tm_fifo_overflow",
+ "pswwr_usdm_fifo_overflow",
+ "pswwr_usdmdp_fifo_overflow",
+ "pswwr_xsdm_fifo_overflow",
+ "pswwr_tsdm_fifo_overflow",
+ "pswwr_cduwr_fifo_overflow",
+ "pswwr_dbg_fifo_overflow",
+ "pswwr_dmae_fifo_overflow",
+ "pswwr_hc_fifo_overflow",
+ "pswwr_msdm_fifo_overflow",
+ "pswwr_ysdm_fifo_overflow",
+ "pswwr_psdm_fifo_overflow",
+ "pswwr_m2p_fifo_overflow",
+};
+#else
+#define pswwr_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_bb_a0 = {
+ 0, 16, pswwr_int0_bb_a0_attn_idx, 0x29a180, 0x29a18c, 0x29a188,
+ 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_bb_a0_regs[1] = {
+ &pswwr_int0_bb_a0,
+};
+
+static const u16 pswwr_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_bb_b0 = {
+ 0, 16, pswwr_int0_bb_b0_attn_idx, 0x29a180, 0x29a18c, 0x29a188,
+ 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
+ &pswwr_int0_bb_b0,
+};
+
+static const u16 pswwr_int0_k2_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_k2 = {
+ 0, 16, pswwr_int0_k2_attn_idx, 0x29a180, 0x29a18c, 0x29a188, 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_k2_regs[1] = {
+ &pswwr_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr_prty_attn_desc[1] = {
+ "pswwr_datapath_registers",
+};
+#else
+#define pswwr_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr_prty0_bb_b0 = {
+ 0, 1, pswwr_prty0_bb_b0_attn_idx, 0x29a190, 0x29a19c, 0x29a198,
+ 0x29a194
+};
+
+static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
+ &pswwr_prty0_bb_b0,
+};
+
+static const u16 pswwr_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr_prty0_k2 = {
+ 0, 1, pswwr_prty0_k2_attn_idx, 0x29a190, 0x29a19c, 0x29a198, 0x29a194
+};
+
+static struct attn_hw_reg *pswwr_prty_k2_regs[1] = {
+ &pswwr_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr2_int_attn_desc[19] = {
+ "pswwr2_address_error",
+ "pswwr2_pglue_eop_error",
+ "pswwr2_pglue_lsr_error",
+ "pswwr2_tm_underflow",
+ "pswwr2_qm_underflow",
+ "pswwr2_src_underflow",
+ "pswwr2_usdm_underflow",
+ "pswwr2_tsdm_underflow",
+ "pswwr2_xsdm_underflow",
+ "pswwr2_usdmdp_underflow",
+ "pswwr2_cdu_underflow",
+ "pswwr2_dbg_underflow",
+ "pswwr2_dmae_underflow",
+ "pswwr2_hc_underflow",
+ "pswwr2_msdm_underflow",
+ "pswwr2_ysdm_underflow",
+ "pswwr2_psdm_underflow",
+ "pswwr2_m2p_underflow",
+ "pswwr2_pglue_eop_error_in_line",
+};
+#else
+#define pswwr2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr2_int0_bb_a0_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_bb_a0 = {
+ 0, 19, pswwr2_int0_bb_a0_attn_idx, 0x29b180, 0x29b18c, 0x29b188,
+ 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_bb_a0_regs[1] = {
+ &pswwr2_int0_bb_a0,
+};
+
+static const u16 pswwr2_int0_bb_b0_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_bb_b0 = {
+ 0, 19, pswwr2_int0_bb_b0_attn_idx, 0x29b180, 0x29b18c, 0x29b188,
+ 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
+ &pswwr2_int0_bb_b0,
+};
+
+static const u16 pswwr2_int0_k2_attn_idx[19] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_k2 = {
+ 0, 19, pswwr2_int0_k2_attn_idx, 0x29b180, 0x29b18c, 0x29b188, 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_k2_regs[1] = {
+ &pswwr2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr2_prty_attn_desc[114] = {
+ "pswwr2_datapath_registers",
+ "pswwr2_mem008_i_ecc_rf_int",
+ "pswwr2_mem001_i_mem_prty",
+ "pswwr2_mem014_i_mem_prty_0",
+ "pswwr2_mem014_i_mem_prty_1",
+ "pswwr2_mem014_i_mem_prty_2",
+ "pswwr2_mem014_i_mem_prty_3",
+ "pswwr2_mem014_i_mem_prty_4",
+ "pswwr2_mem014_i_mem_prty_5",
+ "pswwr2_mem014_i_mem_prty_6",
+ "pswwr2_mem014_i_mem_prty_7",
+ "pswwr2_mem014_i_mem_prty_8",
+ "pswwr2_mem016_i_mem_prty_0",
+ "pswwr2_mem016_i_mem_prty_1",
+ "pswwr2_mem016_i_mem_prty_2",
+ "pswwr2_mem016_i_mem_prty_3",
+ "pswwr2_mem016_i_mem_prty_4",
+ "pswwr2_mem016_i_mem_prty_5",
+ "pswwr2_mem016_i_mem_prty_6",
+ "pswwr2_mem016_i_mem_prty_7",
+ "pswwr2_mem016_i_mem_prty_8",
+ "pswwr2_mem007_i_mem_prty_0",
+ "pswwr2_mem007_i_mem_prty_1",
+ "pswwr2_mem007_i_mem_prty_2",
+ "pswwr2_mem007_i_mem_prty_3",
+ "pswwr2_mem007_i_mem_prty_4",
+ "pswwr2_mem007_i_mem_prty_5",
+ "pswwr2_mem007_i_mem_prty_6",
+ "pswwr2_mem007_i_mem_prty_7",
+ "pswwr2_mem007_i_mem_prty_8",
+ "pswwr2_mem017_i_mem_prty_0",
+ "pswwr2_mem017_i_mem_prty_1",
+ "pswwr2_mem017_i_mem_prty_2",
+ "pswwr2_mem017_i_mem_prty_3",
+ "pswwr2_mem017_i_mem_prty_4",
+ "pswwr2_mem017_i_mem_prty_5",
+ "pswwr2_mem017_i_mem_prty_6",
+ "pswwr2_mem017_i_mem_prty_7",
+ "pswwr2_mem017_i_mem_prty_8",
+ "pswwr2_mem009_i_mem_prty_0",
+ "pswwr2_mem009_i_mem_prty_1",
+ "pswwr2_mem009_i_mem_prty_2",
+ "pswwr2_mem009_i_mem_prty_3",
+ "pswwr2_mem009_i_mem_prty_4",
+ "pswwr2_mem009_i_mem_prty_5",
+ "pswwr2_mem009_i_mem_prty_6",
+ "pswwr2_mem009_i_mem_prty_7",
+ "pswwr2_mem009_i_mem_prty_8",
+ "pswwr2_mem013_i_mem_prty_0",
+ "pswwr2_mem013_i_mem_prty_1",
+ "pswwr2_mem013_i_mem_prty_2",
+ "pswwr2_mem013_i_mem_prty_3",
+ "pswwr2_mem013_i_mem_prty_4",
+ "pswwr2_mem013_i_mem_prty_5",
+ "pswwr2_mem013_i_mem_prty_6",
+ "pswwr2_mem013_i_mem_prty_7",
+ "pswwr2_mem013_i_mem_prty_8",
+ "pswwr2_mem006_i_mem_prty_0",
+ "pswwr2_mem006_i_mem_prty_1",
+ "pswwr2_mem006_i_mem_prty_2",
+ "pswwr2_mem006_i_mem_prty_3",
+ "pswwr2_mem006_i_mem_prty_4",
+ "pswwr2_mem006_i_mem_prty_5",
+ "pswwr2_mem006_i_mem_prty_6",
+ "pswwr2_mem006_i_mem_prty_7",
+ "pswwr2_mem006_i_mem_prty_8",
+ "pswwr2_mem010_i_mem_prty_0",
+ "pswwr2_mem010_i_mem_prty_1",
+ "pswwr2_mem010_i_mem_prty_2",
+ "pswwr2_mem010_i_mem_prty_3",
+ "pswwr2_mem010_i_mem_prty_4",
+ "pswwr2_mem010_i_mem_prty_5",
+ "pswwr2_mem010_i_mem_prty_6",
+ "pswwr2_mem010_i_mem_prty_7",
+ "pswwr2_mem010_i_mem_prty_8",
+ "pswwr2_mem012_i_mem_prty",
+ "pswwr2_mem011_i_mem_prty_0",
+ "pswwr2_mem011_i_mem_prty_1",
+ "pswwr2_mem011_i_mem_prty_2",
+ "pswwr2_mem011_i_mem_prty_3",
+ "pswwr2_mem011_i_mem_prty_4",
+ "pswwr2_mem011_i_mem_prty_5",
+ "pswwr2_mem011_i_mem_prty_6",
+ "pswwr2_mem011_i_mem_prty_7",
+ "pswwr2_mem011_i_mem_prty_8",
+ "pswwr2_mem004_i_mem_prty_0",
+ "pswwr2_mem004_i_mem_prty_1",
+ "pswwr2_mem004_i_mem_prty_2",
+ "pswwr2_mem004_i_mem_prty_3",
+ "pswwr2_mem004_i_mem_prty_4",
+ "pswwr2_mem004_i_mem_prty_5",
+ "pswwr2_mem004_i_mem_prty_6",
+ "pswwr2_mem004_i_mem_prty_7",
+ "pswwr2_mem004_i_mem_prty_8",
+ "pswwr2_mem015_i_mem_prty_0",
+ "pswwr2_mem015_i_mem_prty_1",
+ "pswwr2_mem015_i_mem_prty_2",
+ "pswwr2_mem005_i_mem_prty_0",
+ "pswwr2_mem005_i_mem_prty_1",
+ "pswwr2_mem005_i_mem_prty_2",
+ "pswwr2_mem005_i_mem_prty_3",
+ "pswwr2_mem005_i_mem_prty_4",
+ "pswwr2_mem005_i_mem_prty_5",
+ "pswwr2_mem005_i_mem_prty_6",
+ "pswwr2_mem005_i_mem_prty_7",
+ "pswwr2_mem005_i_mem_prty_8",
+ "pswwr2_mem002_i_mem_prty_0",
+ "pswwr2_mem002_i_mem_prty_1",
+ "pswwr2_mem002_i_mem_prty_2",
+ "pswwr2_mem002_i_mem_prty_3",
+ "pswwr2_mem002_i_mem_prty_4",
+ "pswwr2_mem003_i_mem_prty_0",
+ "pswwr2_mem003_i_mem_prty_1",
+ "pswwr2_mem003_i_mem_prty_2",
+};
+#else
+#define pswwr2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr2_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_bb_a0 = {
+ 0, 31, pswwr2_prty1_bb_a0_attn_idx, 0x29b200, 0x29b20c, 0x29b208,
+ 0x29b204
+};
+
+static const u16 pswwr2_prty2_bb_a0_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_bb_a0 = {
+ 1, 31, pswwr2_prty2_bb_a0_attn_idx, 0x29b210, 0x29b21c, 0x29b218,
+ 0x29b214
+};
+
+static const u16 pswwr2_prty3_bb_a0_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_bb_a0 = {
+ 2, 31, pswwr2_prty3_bb_a0_attn_idx, 0x29b220, 0x29b22c, 0x29b228,
+ 0x29b224
+};
+
+static const u16 pswwr2_prty4_bb_a0_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_bb_a0 = {
+ 3, 20, pswwr2_prty4_bb_a0_attn_idx, 0x29b230, 0x29b23c, 0x29b238,
+ 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_bb_a0_regs[4] = {
+ &pswwr2_prty1_bb_a0, &pswwr2_prty2_bb_a0, &pswwr2_prty3_bb_a0,
+ &pswwr2_prty4_bb_a0,
+};
+
+static const u16 pswwr2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
+ 0, 1, pswwr2_prty0_bb_b0_attn_idx, 0x29b190, 0x29b19c, 0x29b198,
+ 0x29b194
+};
+
+static const u16 pswwr2_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
+ 1, 31, pswwr2_prty1_bb_b0_attn_idx, 0x29b200, 0x29b20c, 0x29b208,
+ 0x29b204
+};
+
+static const u16 pswwr2_prty2_bb_b0_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
+ 2, 31, pswwr2_prty2_bb_b0_attn_idx, 0x29b210, 0x29b21c, 0x29b218,
+ 0x29b214
+};
+
+static const u16 pswwr2_prty3_bb_b0_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
+ 3, 31, pswwr2_prty3_bb_b0_attn_idx, 0x29b220, 0x29b22c, 0x29b228,
+ 0x29b224
+};
+
+static const u16 pswwr2_prty4_bb_b0_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
+ 4, 20, pswwr2_prty4_bb_b0_attn_idx, 0x29b230, 0x29b23c, 0x29b238,
+ 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
+ &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
+ &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0,
+};
+
+static const u16 pswwr2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswwr2_prty0_k2 = {
+ 0, 1, pswwr2_prty0_k2_attn_idx, 0x29b190, 0x29b19c, 0x29b198, 0x29b194
+};
+
+static const u16 pswwr2_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_k2 = {
+ 1, 31, pswwr2_prty1_k2_attn_idx, 0x29b200, 0x29b20c, 0x29b208, 0x29b204
+};
+
+static const u16 pswwr2_prty2_k2_attn_idx[31] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_k2 = {
+ 2, 31, pswwr2_prty2_k2_attn_idx, 0x29b210, 0x29b21c, 0x29b218, 0x29b214
+};
+
+static const u16 pswwr2_prty3_k2_attn_idx[31] = {
+ 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_k2 = {
+ 3, 31, pswwr2_prty3_k2_attn_idx, 0x29b220, 0x29b22c, 0x29b228, 0x29b224
+};
+
+static const u16 pswwr2_prty4_k2_attn_idx[20] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109,
+ 110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_k2 = {
+ 4, 20, pswwr2_prty4_k2_attn_idx, 0x29b230, 0x29b23c, 0x29b238, 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_k2_regs[5] = {
+ &pswwr2_prty0_k2, &pswwr2_prty1_k2, &pswwr2_prty2_k2, &pswwr2_prty3_k2,
+ &pswwr2_prty4_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq_int_attn_desc[21] = {
+ "pswrq_address_error",
+ "pswrq_pbf_fifo_overflow",
+ "pswrq_src_fifo_overflow",
+ "pswrq_qm_fifo_overflow",
+ "pswrq_tm_fifo_overflow",
+ "pswrq_usdm_fifo_overflow",
+ "pswrq_m2p_fifo_overflow",
+ "pswrq_xsdm_fifo_overflow",
+ "pswrq_tsdm_fifo_overflow",
+ "pswrq_ptu_fifo_overflow",
+ "pswrq_cduwr_fifo_overflow",
+ "pswrq_cdurd_fifo_overflow",
+ "pswrq_dmae_fifo_overflow",
+ "pswrq_hc_fifo_overflow",
+ "pswrq_dbg_fifo_overflow",
+ "pswrq_msdm_fifo_overflow",
+ "pswrq_ysdm_fifo_overflow",
+ "pswrq_psdm_fifo_overflow",
+ "pswrq_prm_fifo_overflow",
+ "pswrq_muld_fifo_overflow",
+ "pswrq_xyld_fifo_overflow",
+};
+#else
+#define pswrq_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq_int0_bb_a0_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_bb_a0 = {
+ 0, 21, pswrq_int0_bb_a0_attn_idx, 0x280180, 0x28018c, 0x280188,
+ 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_bb_a0_regs[1] = {
+ &pswrq_int0_bb_a0,
+};
+
+static const u16 pswrq_int0_bb_b0_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_bb_b0 = {
+ 0, 21, pswrq_int0_bb_b0_attn_idx, 0x280180, 0x28018c, 0x280188,
+ 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
+ &pswrq_int0_bb_b0,
+};
+
+static const u16 pswrq_int0_k2_attn_idx[21] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+};
+
+static struct attn_hw_reg pswrq_int0_k2 = {
+ 0, 21, pswrq_int0_k2_attn_idx, 0x280180, 0x28018c, 0x280188, 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_k2_regs[1] = {
+ &pswrq_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq_prty_attn_desc[1] = {
+ "pswrq_pxp_busip_parity",
+};
+#else
+#define pswrq_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrq_prty0_bb_b0 = {
+ 0, 1, pswrq_prty0_bb_b0_attn_idx, 0x280190, 0x28019c, 0x280198,
+ 0x280194
+};
+
+static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
+ &pswrq_prty0_bb_b0,
+};
+
+static const u16 pswrq_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pswrq_prty0_k2 = {
+ 0, 1, pswrq_prty0_k2_attn_idx, 0x280190, 0x28019c, 0x280198, 0x280194
+};
+
+static struct attn_hw_reg *pswrq_prty_k2_regs[1] = {
+ &pswrq_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq2_int_attn_desc[15] = {
+ "pswrq2_address_error",
+ "pswrq2_l2p_fifo_overflow",
+ "pswrq2_wdfifo_overflow",
+ "pswrq2_phyaddr_fifo_of",
+ "pswrq2_l2p_violation_1",
+ "pswrq2_l2p_violation_2",
+ "pswrq2_free_list_empty",
+ "pswrq2_elt_addr",
+ "pswrq2_l2p_vf_err",
+ "pswrq2_core_wdone_overflow",
+ "pswrq2_treq_fifo_underflow",
+ "pswrq2_treq_fifo_overflow",
+ "pswrq2_icpl_fifo_underflow",
+ "pswrq2_icpl_fifo_overflow",
+ "pswrq2_back2back_atc_response",
+};
+#else
+#define pswrq2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq2_int0_bb_a0_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_bb_a0 = {
+ 0, 15, pswrq2_int0_bb_a0_attn_idx, 0x240180, 0x24018c, 0x240188,
+ 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_bb_a0_regs[1] = {
+ &pswrq2_int0_bb_a0,
+};
+
+static const u16 pswrq2_int0_bb_b0_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_bb_b0 = {
+ 0, 15, pswrq2_int0_bb_b0_attn_idx, 0x240180, 0x24018c, 0x240188,
+ 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
+ &pswrq2_int0_bb_b0,
+};
+
+static const u16 pswrq2_int0_k2_attn_idx[15] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_k2 = {
+ 0, 15, pswrq2_int0_k2_attn_idx, 0x240180, 0x24018c, 0x240188, 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_k2_regs[1] = {
+ &pswrq2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq2_prty_attn_desc[11] = {
+ "pswrq2_mem004_i_ecc_rf_int",
+ "pswrq2_mem005_i_ecc_rf_int",
+ "pswrq2_mem001_i_ecc_rf_int",
+ "pswrq2_mem006_i_mem_prty",
+ "pswrq2_mem008_i_mem_prty",
+ "pswrq2_mem009_i_mem_prty",
+ "pswrq2_mem003_i_mem_prty",
+ "pswrq2_mem002_i_mem_prty",
+ "pswrq2_mem010_i_mem_prty",
+ "pswrq2_mem007_i_mem_prty",
+ "pswrq2_mem005_i_mem_prty",
+};
+#else
+#define pswrq2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq2_prty1_bb_a0_attn_idx[9] = {
+ 0, 2, 3, 4, 5, 6, 7, 9, 10,
+};
+
+static struct attn_hw_reg pswrq2_prty1_bb_a0 = {
+ 0, 9, pswrq2_prty1_bb_a0_attn_idx, 0x240200, 0x24020c, 0x240208,
+ 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_bb_a0_regs[1] = {
+ &pswrq2_prty1_bb_a0,
+};
+
+static const u16 pswrq2_prty1_bb_b0_attn_idx[9] = {
+ 0, 2, 3, 4, 5, 6, 7, 9, 10,
+};
+
+static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
+ 0, 9, pswrq2_prty1_bb_b0_attn_idx, 0x240200, 0x24020c, 0x240208,
+ 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
+ &pswrq2_prty1_bb_b0,
+};
+
+static const u16 pswrq2_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg pswrq2_prty1_k2 = {
+ 0, 10, pswrq2_prty1_k2_attn_idx, 0x240200, 0x24020c, 0x240208, 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_k2_regs[1] = {
+ &pswrq2_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglcs_int_attn_desc[2] = {
+ "pglcs_address_error",
+ "pglcs_rasdp_error",
+};
+#else
+#define pglcs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglcs_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglcs_int0_bb_a0 = {
+ 0, 1, pglcs_int0_bb_a0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_bb_a0_regs[1] = {
+ &pglcs_int0_bb_a0,
+};
+
+static const u16 pglcs_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pglcs_int0_bb_b0 = {
+ 0, 1, pglcs_int0_bb_b0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
+ &pglcs_int0_bb_b0,
+};
+
+static const u16 pglcs_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg pglcs_int0_k2 = {
+ 0, 2, pglcs_int0_k2_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_k2_regs[1] = {
+ &pglcs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dmae_int_attn_desc[2] = {
+ "dmae_address_error",
+ "dmae_pci_rd_buf_err",
+};
+#else
+#define dmae_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dmae_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_bb_a0 = {
+ 0, 2, dmae_int0_bb_a0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_bb_a0_regs[1] = {
+ &dmae_int0_bb_a0,
+};
+
+static const u16 dmae_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_bb_b0 = {
+ 0, 2, dmae_int0_bb_b0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
+ &dmae_int0_bb_b0,
+};
+
+static const u16 dmae_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_k2 = {
+ 0, 2, dmae_int0_k2_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_k2_regs[1] = {
+ &dmae_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dmae_prty_attn_desc[3] = {
+ "dmae_mem002_i_mem_prty",
+ "dmae_mem001_i_mem_prty",
+ "dmae_mem003_i_mem_prty",
+};
+#else
+#define dmae_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dmae_prty1_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_bb_a0 = {
+ 0, 3, dmae_prty1_bb_a0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_bb_a0_regs[1] = {
+ &dmae_prty1_bb_a0,
+};
+
+static const u16 dmae_prty1_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_bb_b0 = {
+ 0, 3, dmae_prty1_bb_b0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
+ &dmae_prty1_bb_b0,
+};
+
+static const u16 dmae_prty1_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_k2 = {
+ 0, 3, dmae_prty1_k2_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_k2_regs[1] = {
+ &dmae_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ptu_int_attn_desc[8] = {
+ "ptu_address_error",
+ "ptu_atc_tcpl_to_not_pend",
+ "ptu_atc_gpa_multiple_hits",
+ "ptu_atc_rcpl_to_empty_cnt",
+ "ptu_atc_tcpl_error",
+ "ptu_atc_inv_halt",
+ "ptu_atc_reuse_transpend",
+ "ptu_atc_ireq_less_than_stu",
+};
+#else
+#define ptu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ptu_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_bb_a0 = {
+ 0, 8, ptu_int0_bb_a0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_bb_a0_regs[1] = {
+ &ptu_int0_bb_a0,
+};
+
+static const u16 ptu_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_bb_b0 = {
+ 0, 8, ptu_int0_bb_b0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
+ &ptu_int0_bb_b0,
+};
+
+static const u16 ptu_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_k2 = {
+ 0, 8, ptu_int0_k2_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_k2_regs[1] = {
+ &ptu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ptu_prty_attn_desc[18] = {
+ "ptu_mem017_i_ecc_rf_int",
+ "ptu_mem018_i_mem_prty",
+ "ptu_mem006_i_mem_prty",
+ "ptu_mem001_i_mem_prty",
+ "ptu_mem002_i_mem_prty",
+ "ptu_mem003_i_mem_prty",
+ "ptu_mem004_i_mem_prty",
+ "ptu_mem005_i_mem_prty",
+ "ptu_mem009_i_mem_prty",
+ "ptu_mem010_i_mem_prty",
+ "ptu_mem016_i_mem_prty",
+ "ptu_mem007_i_mem_prty",
+ "ptu_mem015_i_mem_prty",
+ "ptu_mem013_i_mem_prty",
+ "ptu_mem012_i_mem_prty",
+ "ptu_mem014_i_mem_prty",
+ "ptu_mem011_i_mem_prty",
+ "ptu_mem008_i_mem_prty",
+};
+#else
+#define ptu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ptu_prty1_bb_a0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_bb_a0 = {
+ 0, 18, ptu_prty1_bb_a0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_bb_a0_regs[1] = {
+ &ptu_prty1_bb_a0,
+};
+
+static const u16 ptu_prty1_bb_b0_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_bb_b0 = {
+ 0, 18, ptu_prty1_bb_b0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
+ &ptu_prty1_bb_b0,
+};
+
+static const u16 ptu_prty1_k2_attn_idx[18] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_k2 = {
+ 0, 18, ptu_prty1_k2_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_k2_regs[1] = {
+ &ptu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcm_int_attn_desc[41] = {
+ "tcm_address_error",
+ "tcm_is_storm_ovfl_err",
+ "tcm_is_storm_under_err",
+ "tcm_is_tsdm_ovfl_err",
+ "tcm_is_tsdm_under_err",
+ "tcm_is_msem_ovfl_err",
+ "tcm_is_msem_under_err",
+ "tcm_is_ysem_ovfl_err",
+ "tcm_is_ysem_under_err",
+ "tcm_is_dorq_ovfl_err",
+ "tcm_is_dorq_under_err",
+ "tcm_is_pbf_ovfl_err",
+ "tcm_is_pbf_under_err",
+ "tcm_is_prs_ovfl_err",
+ "tcm_is_prs_under_err",
+ "tcm_is_tm_ovfl_err",
+ "tcm_is_tm_under_err",
+ "tcm_is_qm_p_ovfl_err",
+ "tcm_is_qm_p_under_err",
+ "tcm_is_qm_s_ovfl_err",
+ "tcm_is_qm_s_under_err",
+ "tcm_is_grc_ovfl_err0",
+ "tcm_is_grc_under_err0",
+ "tcm_is_grc_ovfl_err1",
+ "tcm_is_grc_under_err1",
+ "tcm_is_grc_ovfl_err2",
+ "tcm_is_grc_under_err2",
+ "tcm_is_grc_ovfl_err3",
+ "tcm_is_grc_under_err3",
+ "tcm_in_prcs_tbl_ovfl",
+ "tcm_agg_con_data_buf_ovfl",
+ "tcm_agg_con_cmd_buf_ovfl",
+ "tcm_sm_con_data_buf_ovfl",
+ "tcm_sm_con_cmd_buf_ovfl",
+ "tcm_agg_task_data_buf_ovfl",
+ "tcm_agg_task_cmd_buf_ovfl",
+ "tcm_sm_task_data_buf_ovfl",
+ "tcm_sm_task_cmd_buf_ovfl",
+ "tcm_fi_desc_input_violate",
+ "tcm_se_desc_input_violate",
+ "tcm_qmreg_more4",
+};
+#else
+#define tcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcm_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_bb_a0 = {
+ 0, 8, tcm_int0_bb_a0_attn_idx, 0x1180180, 0x118018c, 0x1180188,
+ 0x1180184
+};
+
+static const u16 tcm_int1_bb_a0_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_bb_a0 = {
+ 1, 32, tcm_int1_bb_a0_attn_idx, 0x1180190, 0x118019c, 0x1180198,
+ 0x1180194
+};
+
+static const u16 tcm_int2_bb_a0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_bb_a0 = {
+ 2, 1, tcm_int2_bb_a0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8,
+ 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_bb_a0_regs[3] = {
+ &tcm_int0_bb_a0, &tcm_int1_bb_a0, &tcm_int2_bb_a0,
+};
+
+static const u16 tcm_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_bb_b0 = {
+ 0, 8, tcm_int0_bb_b0_attn_idx, 0x1180180, 0x118018c, 0x1180188,
+ 0x1180184
+};
+
+static const u16 tcm_int1_bb_b0_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_bb_b0 = {
+ 1, 32, tcm_int1_bb_b0_attn_idx, 0x1180190, 0x118019c, 0x1180198,
+ 0x1180194
+};
+
+static const u16 tcm_int2_bb_b0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_bb_b0 = {
+ 2, 1, tcm_int2_bb_b0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8,
+ 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
+ &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0,
+};
+
+static const u16 tcm_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_k2 = {
+ 0, 8, tcm_int0_k2_attn_idx, 0x1180180, 0x118018c, 0x1180188, 0x1180184
+};
+
+static const u16 tcm_int1_k2_attn_idx[32] = {
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_k2 = {
+ 1, 32, tcm_int1_k2_attn_idx, 0x1180190, 0x118019c, 0x1180198, 0x1180194
+};
+
+static const u16 tcm_int2_k2_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg tcm_int2_k2 = {
+ 2, 1, tcm_int2_k2_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_k2_regs[3] = {
+ &tcm_int0_k2, &tcm_int1_k2, &tcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcm_prty_attn_desc[51] = {
+ "tcm_mem026_i_ecc_rf_int",
+ "tcm_mem003_i_ecc_0_rf_int",
+ "tcm_mem003_i_ecc_1_rf_int",
+ "tcm_mem022_i_ecc_0_rf_int",
+ "tcm_mem022_i_ecc_1_rf_int",
+ "tcm_mem005_i_ecc_0_rf_int",
+ "tcm_mem005_i_ecc_1_rf_int",
+ "tcm_mem024_i_ecc_0_rf_int",
+ "tcm_mem024_i_ecc_1_rf_int",
+ "tcm_mem018_i_mem_prty",
+ "tcm_mem019_i_mem_prty",
+ "tcm_mem015_i_mem_prty",
+ "tcm_mem016_i_mem_prty",
+ "tcm_mem017_i_mem_prty",
+ "tcm_mem010_i_mem_prty",
+ "tcm_mem020_i_mem_prty",
+ "tcm_mem011_i_mem_prty",
+ "tcm_mem012_i_mem_prty",
+ "tcm_mem013_i_mem_prty",
+ "tcm_mem014_i_mem_prty",
+ "tcm_mem029_i_mem_prty",
+ "tcm_mem028_i_mem_prty",
+ "tcm_mem027_i_mem_prty",
+ "tcm_mem004_i_mem_prty",
+ "tcm_mem023_i_mem_prty",
+ "tcm_mem006_i_mem_prty",
+ "tcm_mem025_i_mem_prty",
+ "tcm_mem021_i_mem_prty",
+ "tcm_mem007_i_mem_prty_0",
+ "tcm_mem007_i_mem_prty_1",
+ "tcm_mem008_i_mem_prty",
+ "tcm_mem025_i_ecc_rf_int",
+ "tcm_mem021_i_ecc_0_rf_int",
+ "tcm_mem021_i_ecc_1_rf_int",
+ "tcm_mem023_i_ecc_0_rf_int",
+ "tcm_mem023_i_ecc_1_rf_int",
+ "tcm_mem026_i_mem_prty",
+ "tcm_mem022_i_mem_prty",
+ "tcm_mem024_i_mem_prty",
+ "tcm_mem009_i_mem_prty",
+ "tcm_mem024_i_ecc_rf_int",
+ "tcm_mem001_i_ecc_0_rf_int",
+ "tcm_mem001_i_ecc_1_rf_int",
+ "tcm_mem019_i_ecc_0_rf_int",
+ "tcm_mem019_i_ecc_1_rf_int",
+ "tcm_mem022_i_ecc_rf_int",
+ "tcm_mem002_i_mem_prty",
+ "tcm_mem005_i_mem_prty_0",
+ "tcm_mem005_i_mem_prty_1",
+ "tcm_mem001_i_mem_prty",
+ "tcm_mem007_i_mem_prty",
+};
+#else
+#define tcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 30, 32,
+ 33, 36, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg tcm_prty1_bb_a0 = {
+ 0, 31, tcm_prty1_bb_a0_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_bb_a0_attn_idx[3] = {
+ 50, 21, 20,
+};
+
+static struct attn_hw_reg tcm_prty2_bb_a0 = {
+ 1, 3, tcm_prty2_bb_a0_attn_idx, 0x1180210, 0x118021c, 0x1180218,
+ 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_bb_a0_regs[2] = {
+ &tcm_prty1_bb_a0, &tcm_prty2_bb_a0,
+};
+
+static const u16 tcm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25,
+ 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_prty1_bb_b0 = {
+ 0, 31, tcm_prty1_bb_b0_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_bb_b0_attn_idx[2] = {
+ 49, 46,
+};
+
+static struct attn_hw_reg tcm_prty2_bb_b0 = {
+ 1, 2, tcm_prty2_bb_b0_attn_idx, 0x1180210, 0x118021c, 0x1180218,
+ 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
+ &tcm_prty1_bb_b0, &tcm_prty2_bb_b0,
+};
+
+static const u16 tcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg tcm_prty1_k2 = {
+ 0, 31, tcm_prty1_k2_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+ 0x1180204
+};
+
+static const u16 tcm_prty2_k2_attn_idx[3] = {
+ 39, 49, 46,
+};
+
+static struct attn_hw_reg tcm_prty2_k2 = {
+ 1, 3, tcm_prty2_k2_attn_idx, 0x1180210, 0x118021c, 0x1180218, 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_k2_regs[2] = {
+ &tcm_prty1_k2, &tcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcm_int_attn_desc[41] = {
+ "mcm_address_error",
+ "mcm_is_storm_ovfl_err",
+ "mcm_is_storm_under_err",
+ "mcm_is_msdm_ovfl_err",
+ "mcm_is_msdm_under_err",
+ "mcm_is_ysdm_ovfl_err",
+ "mcm_is_ysdm_under_err",
+ "mcm_is_usdm_ovfl_err",
+ "mcm_is_usdm_under_err",
+ "mcm_is_tmld_ovfl_err",
+ "mcm_is_tmld_under_err",
+ "mcm_is_usem_ovfl_err",
+ "mcm_is_usem_under_err",
+ "mcm_is_ysem_ovfl_err",
+ "mcm_is_ysem_under_err",
+ "mcm_is_pbf_ovfl_err",
+ "mcm_is_pbf_under_err",
+ "mcm_is_qm_p_ovfl_err",
+ "mcm_is_qm_p_under_err",
+ "mcm_is_qm_s_ovfl_err",
+ "mcm_is_qm_s_under_err",
+ "mcm_is_grc_ovfl_err0",
+ "mcm_is_grc_under_err0",
+ "mcm_is_grc_ovfl_err1",
+ "mcm_is_grc_under_err1",
+ "mcm_is_grc_ovfl_err2",
+ "mcm_is_grc_under_err2",
+ "mcm_is_grc_ovfl_err3",
+ "mcm_is_grc_under_err3",
+ "mcm_in_prcs_tbl_ovfl",
+ "mcm_agg_con_data_buf_ovfl",
+ "mcm_agg_con_cmd_buf_ovfl",
+ "mcm_sm_con_data_buf_ovfl",
+ "mcm_sm_con_cmd_buf_ovfl",
+ "mcm_agg_task_data_buf_ovfl",
+ "mcm_agg_task_cmd_buf_ovfl",
+ "mcm_sm_task_data_buf_ovfl",
+ "mcm_sm_task_cmd_buf_ovfl",
+ "mcm_fi_desc_input_violate",
+ "mcm_se_desc_input_violate",
+ "mcm_qmreg_more4",
+};
+#else
+#define mcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcm_int0_bb_a0_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_bb_a0 = {
+ 0, 14, mcm_int0_bb_a0_attn_idx, 0x1200180, 0x120018c, 0x1200188,
+ 0x1200184
+};
+
+static const u16 mcm_int1_bb_a0_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_bb_a0 = {
+ 1, 26, mcm_int1_bb_a0_attn_idx, 0x1200190, 0x120019c, 0x1200198,
+ 0x1200194
+};
+
+static const u16 mcm_int2_bb_a0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_bb_a0 = {
+ 2, 1, mcm_int2_bb_a0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8,
+ 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_bb_a0_regs[3] = {
+ &mcm_int0_bb_a0, &mcm_int1_bb_a0, &mcm_int2_bb_a0,
+};
+
+static const u16 mcm_int0_bb_b0_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_bb_b0 = {
+ 0, 14, mcm_int0_bb_b0_attn_idx, 0x1200180, 0x120018c, 0x1200188,
+ 0x1200184
+};
+
+static const u16 mcm_int1_bb_b0_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_bb_b0 = {
+ 1, 26, mcm_int1_bb_b0_attn_idx, 0x1200190, 0x120019c, 0x1200198,
+ 0x1200194
+};
+
+static const u16 mcm_int2_bb_b0_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_bb_b0 = {
+ 2, 1, mcm_int2_bb_b0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8,
+ 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
+ &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0,
+};
+
+static const u16 mcm_int0_k2_attn_idx[14] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_k2 = {
+ 0, 14, mcm_int0_k2_attn_idx, 0x1200180, 0x120018c, 0x1200188, 0x1200184
+};
+
+static const u16 mcm_int1_k2_attn_idx[26] = {
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_k2 = {
+ 1, 26, mcm_int1_k2_attn_idx, 0x1200190, 0x120019c, 0x1200198, 0x1200194
+};
+
+static const u16 mcm_int2_k2_attn_idx[1] = {
+ 40,
+};
+
+static struct attn_hw_reg mcm_int2_k2 = {
+ 2, 1, mcm_int2_k2_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_k2_regs[3] = {
+ &mcm_int0_k2, &mcm_int1_k2, &mcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcm_prty_attn_desc[46] = {
+ "mcm_mem028_i_ecc_rf_int",
+ "mcm_mem003_i_ecc_rf_int",
+ "mcm_mem023_i_ecc_0_rf_int",
+ "mcm_mem023_i_ecc_1_rf_int",
+ "mcm_mem005_i_ecc_0_rf_int",
+ "mcm_mem005_i_ecc_1_rf_int",
+ "mcm_mem025_i_ecc_0_rf_int",
+ "mcm_mem025_i_ecc_1_rf_int",
+ "mcm_mem026_i_ecc_rf_int",
+ "mcm_mem017_i_mem_prty",
+ "mcm_mem019_i_mem_prty",
+ "mcm_mem016_i_mem_prty",
+ "mcm_mem015_i_mem_prty",
+ "mcm_mem020_i_mem_prty",
+ "mcm_mem021_i_mem_prty",
+ "mcm_mem018_i_mem_prty",
+ "mcm_mem011_i_mem_prty",
+ "mcm_mem012_i_mem_prty",
+ "mcm_mem013_i_mem_prty",
+ "mcm_mem014_i_mem_prty",
+ "mcm_mem031_i_mem_prty",
+ "mcm_mem030_i_mem_prty",
+ "mcm_mem029_i_mem_prty",
+ "mcm_mem004_i_mem_prty",
+ "mcm_mem024_i_mem_prty",
+ "mcm_mem006_i_mem_prty",
+ "mcm_mem027_i_mem_prty",
+ "mcm_mem022_i_mem_prty",
+ "mcm_mem007_i_mem_prty_0",
+ "mcm_mem007_i_mem_prty_1",
+ "mcm_mem008_i_mem_prty",
+ "mcm_mem001_i_ecc_rf_int",
+ "mcm_mem021_i_ecc_0_rf_int",
+ "mcm_mem021_i_ecc_1_rf_int",
+ "mcm_mem003_i_ecc_0_rf_int",
+ "mcm_mem003_i_ecc_1_rf_int",
+ "mcm_mem024_i_ecc_rf_int",
+ "mcm_mem009_i_mem_prty",
+ "mcm_mem010_i_mem_prty",
+ "mcm_mem028_i_mem_prty",
+ "mcm_mem002_i_mem_prty",
+ "mcm_mem025_i_mem_prty",
+ "mcm_mem005_i_mem_prty_0",
+ "mcm_mem005_i_mem_prty_1",
+ "mcm_mem001_i_mem_prty",
+ "mcm_mem007_i_mem_prty",
+};
+#else
+#define mcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcm_prty1_bb_a0_attn_idx[31] = {
+ 2, 3, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 22, 23, 25, 26, 27, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg mcm_prty1_bb_a0 = {
+ 0, 31, mcm_prty1_bb_a0_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_bb_a0_attn_idx[4] = {
+ 45, 30, 21, 20,
+};
+
+static struct attn_hw_reg mcm_prty2_bb_a0 = {
+ 1, 4, mcm_prty2_bb_a0_attn_idx, 0x1200210, 0x120021c, 0x1200218,
+ 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_bb_a0_regs[2] = {
+ &mcm_prty1_bb_a0, &mcm_prty2_bb_a0,
+};
+
+static const u16 mcm_prty1_bb_b0_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg mcm_prty1_bb_b0 = {
+ 0, 31, mcm_prty1_bb_b0_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_bb_b0_attn_idx[4] = {
+ 37, 38, 44, 40,
+};
+
+static struct attn_hw_reg mcm_prty2_bb_b0 = {
+ 1, 4, mcm_prty2_bb_b0_attn_idx, 0x1200210, 0x120021c, 0x1200218,
+ 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
+ &mcm_prty1_bb_b0, &mcm_prty2_bb_b0,
+};
+
+static const u16 mcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg mcm_prty1_k2 = {
+ 0, 31, mcm_prty1_k2_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+ 0x1200204
+};
+
+static const u16 mcm_prty2_k2_attn_idx[4] = {
+ 37, 38, 44, 40,
+};
+
+static struct attn_hw_reg mcm_prty2_k2 = {
+ 1, 4, mcm_prty2_k2_attn_idx, 0x1200210, 0x120021c, 0x1200218, 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_k2_regs[2] = {
+ &mcm_prty1_k2, &mcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ucm_int_attn_desc[47] = {
+ "ucm_address_error",
+ "ucm_is_storm_ovfl_err",
+ "ucm_is_storm_under_err",
+ "ucm_is_xsdm_ovfl_err",
+ "ucm_is_xsdm_under_err",
+ "ucm_is_ysdm_ovfl_err",
+ "ucm_is_ysdm_under_err",
+ "ucm_is_usdm_ovfl_err",
+ "ucm_is_usdm_under_err",
+ "ucm_is_rdif_ovfl_err",
+ "ucm_is_rdif_under_err",
+ "ucm_is_tdif_ovfl_err",
+ "ucm_is_tdif_under_err",
+ "ucm_is_muld_ovfl_err",
+ "ucm_is_muld_under_err",
+ "ucm_is_yuld_ovfl_err",
+ "ucm_is_yuld_under_err",
+ "ucm_is_dorq_ovfl_err",
+ "ucm_is_dorq_under_err",
+ "ucm_is_pbf_ovfl_err",
+ "ucm_is_pbf_under_err",
+ "ucm_is_tm_ovfl_err",
+ "ucm_is_tm_under_err",
+ "ucm_is_qm_p_ovfl_err",
+ "ucm_is_qm_p_under_err",
+ "ucm_is_qm_s_ovfl_err",
+ "ucm_is_qm_s_under_err",
+ "ucm_is_grc_ovfl_err0",
+ "ucm_is_grc_under_err0",
+ "ucm_is_grc_ovfl_err1",
+ "ucm_is_grc_under_err1",
+ "ucm_is_grc_ovfl_err2",
+ "ucm_is_grc_under_err2",
+ "ucm_is_grc_ovfl_err3",
+ "ucm_is_grc_under_err3",
+ "ucm_in_prcs_tbl_ovfl",
+ "ucm_agg_con_data_buf_ovfl",
+ "ucm_agg_con_cmd_buf_ovfl",
+ "ucm_sm_con_data_buf_ovfl",
+ "ucm_sm_con_cmd_buf_ovfl",
+ "ucm_agg_task_data_buf_ovfl",
+ "ucm_agg_task_cmd_buf_ovfl",
+ "ucm_sm_task_data_buf_ovfl",
+ "ucm_sm_task_cmd_buf_ovfl",
+ "ucm_fi_desc_input_violate",
+ "ucm_se_desc_input_violate",
+ "ucm_qmreg_more4",
+};
+#else
+#define ucm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ucm_int0_bb_a0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_bb_a0 = {
+ 0, 17, ucm_int0_bb_a0_attn_idx, 0x1280180, 0x128018c, 0x1280188,
+ 0x1280184
+};
+
+static const u16 ucm_int1_bb_a0_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_bb_a0 = {
+ 1, 29, ucm_int1_bb_a0_attn_idx, 0x1280190, 0x128019c, 0x1280198,
+ 0x1280194
+};
+
+static const u16 ucm_int2_bb_a0_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_bb_a0 = {
+ 2, 1, ucm_int2_bb_a0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8,
+ 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_bb_a0_regs[3] = {
+ &ucm_int0_bb_a0, &ucm_int1_bb_a0, &ucm_int2_bb_a0,
+};
+
+static const u16 ucm_int0_bb_b0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_bb_b0 = {
+ 0, 17, ucm_int0_bb_b0_attn_idx, 0x1280180, 0x128018c, 0x1280188,
+ 0x1280184
+};
+
+static const u16 ucm_int1_bb_b0_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_bb_b0 = {
+ 1, 29, ucm_int1_bb_b0_attn_idx, 0x1280190, 0x128019c, 0x1280198,
+ 0x1280194
+};
+
+static const u16 ucm_int2_bb_b0_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_bb_b0 = {
+ 2, 1, ucm_int2_bb_b0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8,
+ 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
+ &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0,
+};
+
+static const u16 ucm_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_k2 = {
+ 0, 17, ucm_int0_k2_attn_idx, 0x1280180, 0x128018c, 0x1280188, 0x1280184
+};
+
+static const u16 ucm_int1_k2_attn_idx[29] = {
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_k2 = {
+ 1, 29, ucm_int1_k2_attn_idx, 0x1280190, 0x128019c, 0x1280198, 0x1280194
+};
+
+static const u16 ucm_int2_k2_attn_idx[1] = {
+ 46,
+};
+
+static struct attn_hw_reg ucm_int2_k2 = {
+ 2, 1, ucm_int2_k2_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_k2_regs[3] = {
+ &ucm_int0_k2, &ucm_int1_k2, &ucm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ucm_prty_attn_desc[54] = {
+ "ucm_mem030_i_ecc_rf_int",
+ "ucm_mem005_i_ecc_0_rf_int",
+ "ucm_mem005_i_ecc_1_rf_int",
+ "ucm_mem024_i_ecc_0_rf_int",
+ "ucm_mem024_i_ecc_1_rf_int",
+ "ucm_mem025_i_ecc_rf_int",
+ "ucm_mem007_i_ecc_0_rf_int",
+ "ucm_mem007_i_ecc_1_rf_int",
+ "ucm_mem008_i_ecc_rf_int",
+ "ucm_mem027_i_ecc_0_rf_int",
+ "ucm_mem027_i_ecc_1_rf_int",
+ "ucm_mem028_i_ecc_rf_int",
+ "ucm_mem020_i_mem_prty",
+ "ucm_mem021_i_mem_prty",
+ "ucm_mem019_i_mem_prty",
+ "ucm_mem013_i_mem_prty",
+ "ucm_mem018_i_mem_prty",
+ "ucm_mem022_i_mem_prty",
+ "ucm_mem014_i_mem_prty",
+ "ucm_mem015_i_mem_prty",
+ "ucm_mem016_i_mem_prty",
+ "ucm_mem017_i_mem_prty",
+ "ucm_mem033_i_mem_prty",
+ "ucm_mem032_i_mem_prty",
+ "ucm_mem031_i_mem_prty",
+ "ucm_mem006_i_mem_prty",
+ "ucm_mem026_i_mem_prty",
+ "ucm_mem009_i_mem_prty",
+ "ucm_mem029_i_mem_prty",
+ "ucm_mem023_i_mem_prty",
+ "ucm_mem010_i_mem_prty_0",
+ "ucm_mem003_i_ecc_0_rf_int",
+ "ucm_mem003_i_ecc_1_rf_int",
+ "ucm_mem022_i_ecc_0_rf_int",
+ "ucm_mem022_i_ecc_1_rf_int",
+ "ucm_mem023_i_ecc_rf_int",
+ "ucm_mem006_i_ecc_rf_int",
+ "ucm_mem025_i_ecc_0_rf_int",
+ "ucm_mem025_i_ecc_1_rf_int",
+ "ucm_mem026_i_ecc_rf_int",
+ "ucm_mem011_i_mem_prty",
+ "ucm_mem012_i_mem_prty",
+ "ucm_mem030_i_mem_prty",
+ "ucm_mem004_i_mem_prty",
+ "ucm_mem024_i_mem_prty",
+ "ucm_mem007_i_mem_prty",
+ "ucm_mem027_i_mem_prty",
+ "ucm_mem008_i_mem_prty_0",
+ "ucm_mem010_i_mem_prty_1",
+ "ucm_mem003_i_mem_prty",
+ "ucm_mem001_i_mem_prty",
+ "ucm_mem002_i_mem_prty",
+ "ucm_mem008_i_mem_prty_1",
+ "ucm_mem010_i_mem_prty",
+};
+#else
+#define ucm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ucm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 24, 28, 31, 32, 33, 34,
+ 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+};
+
+static struct attn_hw_reg ucm_prty1_bb_a0 = {
+ 0, 31, ucm_prty1_bb_a0_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_bb_a0_attn_idx[7] = {
+ 50, 51, 52, 27, 53, 23, 22,
+};
+
+static struct attn_hw_reg ucm_prty2_bb_a0 = {
+ 1, 7, ucm_prty2_bb_a0_attn_idx, 0x1280210, 0x128021c, 0x1280218,
+ 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_bb_a0_regs[2] = {
+ &ucm_prty1_bb_a0, &ucm_prty2_bb_a0,
+};
+
+static const u16 ucm_prty1_bb_b0_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ucm_prty1_bb_b0 = {
+ 0, 31, ucm_prty1_bb_b0_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_bb_b0_attn_idx[7] = {
+ 48, 40, 41, 49, 43, 50, 51,
+};
+
+static struct attn_hw_reg ucm_prty2_bb_b0 = {
+ 1, 7, ucm_prty2_bb_b0_attn_idx, 0x1280210, 0x128021c, 0x1280218,
+ 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
+ &ucm_prty1_bb_b0, &ucm_prty2_bb_b0,
+};
+
+static const u16 ucm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ucm_prty1_k2 = {
+ 0, 31, ucm_prty1_k2_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+ 0x1280204
+};
+
+static const u16 ucm_prty2_k2_attn_idx[7] = {
+ 48, 40, 41, 49, 43, 50, 51,
+};
+
+static struct attn_hw_reg ucm_prty2_k2 = {
+ 1, 7, ucm_prty2_k2_attn_idx, 0x1280210, 0x128021c, 0x1280218, 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_k2_regs[2] = {
+ &ucm_prty1_k2, &ucm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xcm_int_attn_desc[49] = {
+ "xcm_address_error",
+ "xcm_is_storm_ovfl_err",
+ "xcm_is_storm_under_err",
+ "xcm_is_msdm_ovfl_err",
+ "xcm_is_msdm_under_err",
+ "xcm_is_xsdm_ovfl_err",
+ "xcm_is_xsdm_under_err",
+ "xcm_is_ysdm_ovfl_err",
+ "xcm_is_ysdm_under_err",
+ "xcm_is_usdm_ovfl_err",
+ "xcm_is_usdm_under_err",
+ "xcm_is_msem_ovfl_err",
+ "xcm_is_msem_under_err",
+ "xcm_is_usem_ovfl_err",
+ "xcm_is_usem_under_err",
+ "xcm_is_ysem_ovfl_err",
+ "xcm_is_ysem_under_err",
+ "xcm_is_dorq_ovfl_err",
+ "xcm_is_dorq_under_err",
+ "xcm_is_pbf_ovfl_err",
+ "xcm_is_pbf_under_err",
+ "xcm_is_tm_ovfl_err",
+ "xcm_is_tm_under_err",
+ "xcm_is_qm_p_ovfl_err",
+ "xcm_is_qm_p_under_err",
+ "xcm_is_qm_s_ovfl_err",
+ "xcm_is_qm_s_under_err",
+ "xcm_is_grc_ovfl_err0",
+ "xcm_is_grc_under_err0",
+ "xcm_is_grc_ovfl_err1",
+ "xcm_is_grc_under_err1",
+ "xcm_is_grc_ovfl_err2",
+ "xcm_is_grc_under_err2",
+ "xcm_is_grc_ovfl_err3",
+ "xcm_is_grc_under_err3",
+ "xcm_in_prcs_tbl_ovfl",
+ "xcm_agg_con_data_buf_ovfl",
+ "xcm_agg_con_cmd_buf_ovfl",
+ "xcm_sm_con_data_buf_ovfl",
+ "xcm_sm_con_cmd_buf_ovfl",
+ "xcm_fi_desc_input_violate",
+ "xcm_qm_act_st_cnt_msg_prcs_under",
+ "xcm_qm_act_st_cnt_msg_prcs_ovfl",
+ "xcm_qm_act_st_cnt_ext_ld_under",
+ "xcm_qm_act_st_cnt_ext_ld_ovfl",
+ "xcm_qm_act_st_cnt_rbc_under",
+ "xcm_qm_act_st_cnt_rbc_ovfl",
+ "xcm_qm_act_st_cnt_drop_under",
+ "xcm_qm_act_st_cnt_illeg_pqnum",
+};
+#else
+#define xcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xcm_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_bb_a0 = {
+ 0, 16, xcm_int0_bb_a0_attn_idx, 0x1000180, 0x100018c, 0x1000188,
+ 0x1000184
+};
+
+static const u16 xcm_int1_bb_a0_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_bb_a0 = {
+ 1, 25, xcm_int1_bb_a0_attn_idx, 0x1000190, 0x100019c, 0x1000198,
+ 0x1000194
+};
+
+static const u16 xcm_int2_bb_a0_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_bb_a0 = {
+ 2, 8, xcm_int2_bb_a0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8,
+ 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_bb_a0_regs[3] = {
+ &xcm_int0_bb_a0, &xcm_int1_bb_a0, &xcm_int2_bb_a0,
+};
+
+static const u16 xcm_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_bb_b0 = {
+ 0, 16, xcm_int0_bb_b0_attn_idx, 0x1000180, 0x100018c, 0x1000188,
+ 0x1000184
+};
+
+static const u16 xcm_int1_bb_b0_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_bb_b0 = {
+ 1, 25, xcm_int1_bb_b0_attn_idx, 0x1000190, 0x100019c, 0x1000198,
+ 0x1000194
+};
+
+static const u16 xcm_int2_bb_b0_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_bb_b0 = {
+ 2, 8, xcm_int2_bb_b0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8,
+ 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
+ &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0,
+};
+
+static const u16 xcm_int0_k2_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_k2 = {
+ 0, 16, xcm_int0_k2_attn_idx, 0x1000180, 0x100018c, 0x1000188, 0x1000184
+};
+
+static const u16 xcm_int1_k2_attn_idx[25] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_k2 = {
+ 1, 25, xcm_int1_k2_attn_idx, 0x1000190, 0x100019c, 0x1000198, 0x1000194
+};
+
+static const u16 xcm_int2_k2_attn_idx[8] = {
+ 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_k2 = {
+ 2, 8, xcm_int2_k2_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_k2_regs[3] = {
+ &xcm_int0_k2, &xcm_int1_k2, &xcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xcm_prty_attn_desc[59] = {
+ "xcm_mem036_i_ecc_rf_int",
+ "xcm_mem003_i_ecc_0_rf_int",
+ "xcm_mem003_i_ecc_1_rf_int",
+ "xcm_mem003_i_ecc_2_rf_int",
+ "xcm_mem003_i_ecc_3_rf_int",
+ "xcm_mem004_i_ecc_rf_int",
+ "xcm_mem033_i_ecc_0_rf_int",
+ "xcm_mem033_i_ecc_1_rf_int",
+ "xcm_mem034_i_ecc_rf_int",
+ "xcm_mem026_i_mem_prty",
+ "xcm_mem025_i_mem_prty",
+ "xcm_mem022_i_mem_prty",
+ "xcm_mem029_i_mem_prty",
+ "xcm_mem023_i_mem_prty",
+ "xcm_mem028_i_mem_prty",
+ "xcm_mem030_i_mem_prty",
+ "xcm_mem017_i_mem_prty",
+ "xcm_mem024_i_mem_prty",
+ "xcm_mem027_i_mem_prty",
+ "xcm_mem018_i_mem_prty",
+ "xcm_mem019_i_mem_prty",
+ "xcm_mem020_i_mem_prty",
+ "xcm_mem021_i_mem_prty",
+ "xcm_mem039_i_mem_prty",
+ "xcm_mem038_i_mem_prty",
+ "xcm_mem037_i_mem_prty",
+ "xcm_mem005_i_mem_prty",
+ "xcm_mem035_i_mem_prty",
+ "xcm_mem031_i_mem_prty",
+ "xcm_mem006_i_mem_prty",
+ "xcm_mem015_i_mem_prty",
+ "xcm_mem035_i_ecc_rf_int",
+ "xcm_mem032_i_ecc_0_rf_int",
+ "xcm_mem032_i_ecc_1_rf_int",
+ "xcm_mem033_i_ecc_rf_int",
+ "xcm_mem036_i_mem_prty",
+ "xcm_mem034_i_mem_prty",
+ "xcm_mem016_i_mem_prty",
+ "xcm_mem002_i_ecc_0_rf_int",
+ "xcm_mem002_i_ecc_1_rf_int",
+ "xcm_mem002_i_ecc_2_rf_int",
+ "xcm_mem002_i_ecc_3_rf_int",
+ "xcm_mem003_i_ecc_rf_int",
+ "xcm_mem031_i_ecc_0_rf_int",
+ "xcm_mem031_i_ecc_1_rf_int",
+ "xcm_mem032_i_ecc_rf_int",
+ "xcm_mem004_i_mem_prty",
+ "xcm_mem033_i_mem_prty",
+ "xcm_mem014_i_mem_prty",
+ "xcm_mem032_i_mem_prty",
+ "xcm_mem007_i_mem_prty",
+ "xcm_mem008_i_mem_prty",
+ "xcm_mem009_i_mem_prty",
+ "xcm_mem010_i_mem_prty",
+ "xcm_mem011_i_mem_prty",
+ "xcm_mem012_i_mem_prty",
+ "xcm_mem013_i_mem_prty",
+ "xcm_mem001_i_mem_prty",
+ "xcm_mem002_i_mem_prty",
+};
+#else
+#define xcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xcm_prty1_bb_a0_attn_idx[31] = {
+ 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 25, 26, 27, 30,
+ 35,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_prty1_bb_a0 = {
+ 0, 31, xcm_prty1_bb_a0_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_bb_a0_attn_idx[11] = {
+ 50, 51, 52, 53, 54, 55, 56, 57, 15, 29, 24,
+};
+
+static struct attn_hw_reg xcm_prty2_bb_a0 = {
+ 1, 11, xcm_prty2_bb_a0_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_bb_a0_regs[2] = {
+ &xcm_prty1_bb_a0, &xcm_prty2_bb_a0,
+};
+
+static const u16 xcm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 24,
+ 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+};
+
+static struct attn_hw_reg xcm_prty1_bb_b0 = {
+ 0, 31, xcm_prty1_bb_b0_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_bb_b0_attn_idx[11] = {
+ 50, 51, 52, 53, 54, 55, 56, 48, 57, 58, 28,
+};
+
+static struct attn_hw_reg xcm_prty2_bb_b0 = {
+ 1, 11, xcm_prty2_bb_b0_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
+ &xcm_prty1_bb_b0, &xcm_prty2_bb_b0,
+};
+
+static const u16 xcm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg xcm_prty1_k2 = {
+ 0, 31, xcm_prty1_k2_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+ 0x1000204
+};
+
+static const u16 xcm_prty2_k2_attn_idx[12] = {
+ 37, 49, 50, 51, 52, 53, 54, 55, 56, 48, 57, 58,
+};
+
+static struct attn_hw_reg xcm_prty2_k2 = {
+ 1, 12, xcm_prty2_k2_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+ 0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_k2_regs[2] = {
+ &xcm_prty1_k2, &xcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ycm_int_attn_desc[37] = {
+ "ycm_address_error",
+ "ycm_is_storm_ovfl_err",
+ "ycm_is_storm_under_err",
+ "ycm_is_msdm_ovfl_err",
+ "ycm_is_msdm_under_err",
+ "ycm_is_ysdm_ovfl_err",
+ "ycm_is_ysdm_under_err",
+ "ycm_is_xyld_ovfl_err",
+ "ycm_is_xyld_under_err",
+ "ycm_is_msem_ovfl_err",
+ "ycm_is_msem_under_err",
+ "ycm_is_usem_ovfl_err",
+ "ycm_is_usem_under_err",
+ "ycm_is_pbf_ovfl_err",
+ "ycm_is_pbf_under_err",
+ "ycm_is_qm_p_ovfl_err",
+ "ycm_is_qm_p_under_err",
+ "ycm_is_qm_s_ovfl_err",
+ "ycm_is_qm_s_under_err",
+ "ycm_is_grc_ovfl_err0",
+ "ycm_is_grc_under_err0",
+ "ycm_is_grc_ovfl_err1",
+ "ycm_is_grc_under_err1",
+ "ycm_is_grc_ovfl_err2",
+ "ycm_is_grc_under_err2",
+ "ycm_is_grc_ovfl_err3",
+ "ycm_is_grc_under_err3",
+ "ycm_in_prcs_tbl_ovfl",
+ "ycm_sm_con_data_buf_ovfl",
+ "ycm_sm_con_cmd_buf_ovfl",
+ "ycm_agg_task_data_buf_ovfl",
+ "ycm_agg_task_cmd_buf_ovfl",
+ "ycm_sm_task_data_buf_ovfl",
+ "ycm_sm_task_cmd_buf_ovfl",
+ "ycm_fi_desc_input_violate",
+ "ycm_se_desc_input_violate",
+ "ycm_qmreg_more4",
+};
+#else
+#define ycm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ycm_int0_bb_a0_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_bb_a0 = {
+ 0, 13, ycm_int0_bb_a0_attn_idx, 0x1080180, 0x108018c, 0x1080188,
+ 0x1080184
+};
+
+static const u16 ycm_int1_bb_a0_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_bb_a0 = {
+ 1, 23, ycm_int1_bb_a0_attn_idx, 0x1080190, 0x108019c, 0x1080198,
+ 0x1080194
+};
+
+static const u16 ycm_int2_bb_a0_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_bb_a0 = {
+ 2, 1, ycm_int2_bb_a0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8,
+ 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_bb_a0_regs[3] = {
+ &ycm_int0_bb_a0, &ycm_int1_bb_a0, &ycm_int2_bb_a0,
+};
+
+static const u16 ycm_int0_bb_b0_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_bb_b0 = {
+ 0, 13, ycm_int0_bb_b0_attn_idx, 0x1080180, 0x108018c, 0x1080188,
+ 0x1080184
+};
+
+static const u16 ycm_int1_bb_b0_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_bb_b0 = {
+ 1, 23, ycm_int1_bb_b0_attn_idx, 0x1080190, 0x108019c, 0x1080198,
+ 0x1080194
+};
+
+static const u16 ycm_int2_bb_b0_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_bb_b0 = {
+ 2, 1, ycm_int2_bb_b0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8,
+ 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
+ &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0,
+};
+
+static const u16 ycm_int0_k2_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_k2 = {
+ 0, 13, ycm_int0_k2_attn_idx, 0x1080180, 0x108018c, 0x1080188, 0x1080184
+};
+
+static const u16 ycm_int1_k2_attn_idx[23] = {
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_k2 = {
+ 1, 23, ycm_int1_k2_attn_idx, 0x1080190, 0x108019c, 0x1080198, 0x1080194
+};
+
+static const u16 ycm_int2_k2_attn_idx[1] = {
+ 36,
+};
+
+static struct attn_hw_reg ycm_int2_k2 = {
+ 2, 1, ycm_int2_k2_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_k2_regs[3] = {
+ &ycm_int0_k2, &ycm_int1_k2, &ycm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ycm_prty_attn_desc[44] = {
+ "ycm_mem027_i_ecc_rf_int",
+ "ycm_mem003_i_ecc_0_rf_int",
+ "ycm_mem003_i_ecc_1_rf_int",
+ "ycm_mem022_i_ecc_0_rf_int",
+ "ycm_mem022_i_ecc_1_rf_int",
+ "ycm_mem023_i_ecc_rf_int",
+ "ycm_mem005_i_ecc_0_rf_int",
+ "ycm_mem005_i_ecc_1_rf_int",
+ "ycm_mem025_i_ecc_0_rf_int",
+ "ycm_mem025_i_ecc_1_rf_int",
+ "ycm_mem018_i_mem_prty",
+ "ycm_mem020_i_mem_prty",
+ "ycm_mem017_i_mem_prty",
+ "ycm_mem016_i_mem_prty",
+ "ycm_mem019_i_mem_prty",
+ "ycm_mem015_i_mem_prty",
+ "ycm_mem011_i_mem_prty",
+ "ycm_mem012_i_mem_prty",
+ "ycm_mem013_i_mem_prty",
+ "ycm_mem014_i_mem_prty",
+ "ycm_mem030_i_mem_prty",
+ "ycm_mem029_i_mem_prty",
+ "ycm_mem028_i_mem_prty",
+ "ycm_mem004_i_mem_prty",
+ "ycm_mem024_i_mem_prty",
+ "ycm_mem006_i_mem_prty",
+ "ycm_mem026_i_mem_prty",
+ "ycm_mem021_i_mem_prty",
+ "ycm_mem007_i_mem_prty_0",
+ "ycm_mem007_i_mem_prty_1",
+ "ycm_mem008_i_mem_prty",
+ "ycm_mem026_i_ecc_rf_int",
+ "ycm_mem021_i_ecc_0_rf_int",
+ "ycm_mem021_i_ecc_1_rf_int",
+ "ycm_mem022_i_ecc_rf_int",
+ "ycm_mem024_i_ecc_0_rf_int",
+ "ycm_mem024_i_ecc_1_rf_int",
+ "ycm_mem027_i_mem_prty",
+ "ycm_mem023_i_mem_prty",
+ "ycm_mem025_i_mem_prty",
+ "ycm_mem009_i_mem_prty",
+ "ycm_mem010_i_mem_prty",
+ "ycm_mem001_i_mem_prty",
+ "ycm_mem002_i_mem_prty",
+};
+#else
+#define ycm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ycm_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg ycm_prty1_bb_a0 = {
+ 0, 31, ycm_prty1_bb_a0_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_bb_a0_attn_idx[3] = {
+ 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_bb_a0 = {
+ 1, 3, ycm_prty2_bb_a0_attn_idx, 0x1080210, 0x108021c, 0x1080218,
+ 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_bb_a0_regs[2] = {
+ &ycm_prty1_bb_a0, &ycm_prty2_bb_a0,
+};
+
+static const u16 ycm_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg ycm_prty1_bb_b0 = {
+ 0, 31, ycm_prty1_bb_b0_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_bb_b0_attn_idx[3] = {
+ 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_bb_b0 = {
+ 1, 3, ycm_prty2_bb_b0_attn_idx, 0x1080210, 0x108021c, 0x1080218,
+ 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
+ &ycm_prty1_bb_b0, &ycm_prty2_bb_b0,
+};
+
+static const u16 ycm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ycm_prty1_k2 = {
+ 0, 31, ycm_prty1_k2_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+ 0x1080204
+};
+
+static const u16 ycm_prty2_k2_attn_idx[4] = {
+ 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_k2 = {
+ 1, 4, ycm_prty2_k2_attn_idx, 0x1080210, 0x108021c, 0x1080218, 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_k2_regs[2] = {
+ &ycm_prty1_k2, &ycm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcm_int_attn_desc[20] = {
+ "pcm_address_error",
+ "pcm_is_storm_ovfl_err",
+ "pcm_is_storm_under_err",
+ "pcm_is_psdm_ovfl_err",
+ "pcm_is_psdm_under_err",
+ "pcm_is_pbf_ovfl_err",
+ "pcm_is_pbf_under_err",
+ "pcm_is_grc_ovfl_err0",
+ "pcm_is_grc_under_err0",
+ "pcm_is_grc_ovfl_err1",
+ "pcm_is_grc_under_err1",
+ "pcm_is_grc_ovfl_err2",
+ "pcm_is_grc_under_err2",
+ "pcm_is_grc_ovfl_err3",
+ "pcm_is_grc_under_err3",
+ "pcm_in_prcs_tbl_ovfl",
+ "pcm_sm_con_data_buf_ovfl",
+ "pcm_sm_con_cmd_buf_ovfl",
+ "pcm_fi_desc_input_violate",
+ "pcm_qmreg_more4",
+};
+#else
+#define pcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcm_int0_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_bb_a0 = {
+ 0, 5, pcm_int0_bb_a0_attn_idx, 0x1100180, 0x110018c, 0x1100188,
+ 0x1100184
+};
+
+static const u16 pcm_int1_bb_a0_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_bb_a0 = {
+ 1, 14, pcm_int1_bb_a0_attn_idx, 0x1100190, 0x110019c, 0x1100198,
+ 0x1100194
+};
+
+static const u16 pcm_int2_bb_a0_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_bb_a0 = {
+ 2, 1, pcm_int2_bb_a0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8,
+ 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_bb_a0_regs[3] = {
+ &pcm_int0_bb_a0, &pcm_int1_bb_a0, &pcm_int2_bb_a0,
+};
+
+static const u16 pcm_int0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_bb_b0 = {
+ 0, 5, pcm_int0_bb_b0_attn_idx, 0x1100180, 0x110018c, 0x1100188,
+ 0x1100184
+};
+
+static const u16 pcm_int1_bb_b0_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_bb_b0 = {
+ 1, 14, pcm_int1_bb_b0_attn_idx, 0x1100190, 0x110019c, 0x1100198,
+ 0x1100194
+};
+
+static const u16 pcm_int2_bb_b0_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_bb_b0 = {
+ 2, 1, pcm_int2_bb_b0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8,
+ 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
+ &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0,
+};
+
+static const u16 pcm_int0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_k2 = {
+ 0, 5, pcm_int0_k2_attn_idx, 0x1100180, 0x110018c, 0x1100188, 0x1100184
+};
+
+static const u16 pcm_int1_k2_attn_idx[14] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_k2 = {
+ 1, 14, pcm_int1_k2_attn_idx, 0x1100190, 0x110019c, 0x1100198, 0x1100194
+};
+
+static const u16 pcm_int2_k2_attn_idx[1] = {
+ 19,
+};
+
+static struct attn_hw_reg pcm_int2_k2 = {
+ 2, 1, pcm_int2_k2_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_k2_regs[3] = {
+ &pcm_int0_k2, &pcm_int1_k2, &pcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcm_prty_attn_desc[18] = {
+ "pcm_mem012_i_ecc_rf_int",
+ "pcm_mem010_i_ecc_0_rf_int",
+ "pcm_mem010_i_ecc_1_rf_int",
+ "pcm_mem008_i_mem_prty",
+ "pcm_mem007_i_mem_prty",
+ "pcm_mem006_i_mem_prty",
+ "pcm_mem002_i_mem_prty",
+ "pcm_mem003_i_mem_prty",
+ "pcm_mem004_i_mem_prty",
+ "pcm_mem005_i_mem_prty",
+ "pcm_mem011_i_mem_prty",
+ "pcm_mem001_i_mem_prty",
+ "pcm_mem011_i_ecc_rf_int",
+ "pcm_mem009_i_ecc_0_rf_int",
+ "pcm_mem009_i_ecc_1_rf_int",
+ "pcm_mem010_i_mem_prty",
+ "pcm_mem013_i_mem_prty",
+ "pcm_mem012_i_mem_prty",
+};
+#else
+#define pcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcm_prty1_bb_a0_attn_idx[14] = {
+ 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pcm_prty1_bb_a0 = {
+ 0, 14, pcm_prty1_bb_a0_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_bb_a0_regs[1] = {
+ &pcm_prty1_bb_a0,
+};
+
+static const u16 pcm_prty1_bb_b0_attn_idx[11] = {
+ 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pcm_prty1_bb_b0 = {
+ 0, 11, pcm_prty1_bb_b0_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
+ &pcm_prty1_bb_b0,
+};
+
+static const u16 pcm_prty1_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg pcm_prty1_k2 = {
+ 0, 12, pcm_prty1_k2_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+ 0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_k2_regs[1] = {
+ &pcm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *qm_int_attn_desc[22] = {
+ "qm_address_error",
+ "qm_ovf_err_tx",
+ "qm_ovf_err_other",
+ "qm_pf_usg_cnt_err",
+ "qm_vf_usg_cnt_err",
+ "qm_voq_crd_inc_err",
+ "qm_voq_crd_dec_err",
+ "qm_byte_crd_inc_err",
+ "qm_byte_crd_dec_err",
+ "qm_err_incdec_rlglblcrd",
+ "qm_err_incdec_rlpfcrd",
+ "qm_err_incdec_wfqpfcrd",
+ "qm_err_incdec_wfqvpcrd",
+ "qm_err_incdec_voqlinecrd",
+ "qm_err_incdec_voqbytecrd",
+ "qm_fifos_error",
+ "qm_qm_rl_dc_exp_pf_controller_pop_error",
+ "qm_qm_rl_dc_exp_pf_controller_push_error",
+ "qm_qm_rl_dc_rf_req_controller_pop_error",
+ "qm_qm_rl_dc_rf_req_controller_push_error",
+ "qm_qm_rl_dc_rf_res_controller_pop_error",
+ "qm_qm_rl_dc_rf_res_controller_push_error",
+};
+#else
+#define qm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 qm_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg qm_int0_bb_a0 = {
+ 0, 16, qm_int0_bb_a0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_bb_a0_regs[1] = {
+ &qm_int0_bb_a0,
+};
+
+static const u16 qm_int0_bb_b0_attn_idx[22] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21,
+};
+
+static struct attn_hw_reg qm_int0_bb_b0 = {
+ 0, 22, qm_int0_bb_b0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
+ &qm_int0_bb_b0,
+};
+
+static const u16 qm_int0_k2_attn_idx[22] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21,
+};
+
+static struct attn_hw_reg qm_int0_k2 = {
+ 0, 22, qm_int0_k2_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_k2_regs[1] = {
+ &qm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *qm_prty_attn_desc[109] = {
+ "qm_xcm_wrc_fifo",
+ "qm_ucm_wrc_fifo",
+ "qm_tcm_wrc_fifo",
+ "qm_ccm_wrc_fifo",
+ "qm_bigramhigh",
+ "qm_bigramlow",
+ "qm_base_address",
+ "qm_wrbuff",
+ "qm_bigramhigh_ext_a",
+ "qm_bigramlow_ext_a",
+ "qm_base_address_ext_a",
+ "qm_mem006_i_ecc_0_rf_int",
+ "qm_mem006_i_ecc_1_rf_int",
+ "qm_mem005_i_ecc_0_rf_int",
+ "qm_mem005_i_ecc_1_rf_int",
+ "qm_mem012_i_ecc_rf_int",
+ "qm_mem037_i_mem_prty",
+ "qm_mem036_i_mem_prty",
+ "qm_mem039_i_mem_prty",
+ "qm_mem038_i_mem_prty",
+ "qm_mem040_i_mem_prty",
+ "qm_mem042_i_mem_prty",
+ "qm_mem041_i_mem_prty",
+ "qm_mem056_i_mem_prty",
+ "qm_mem055_i_mem_prty",
+ "qm_mem053_i_mem_prty",
+ "qm_mem054_i_mem_prty",
+ "qm_mem057_i_mem_prty",
+ "qm_mem058_i_mem_prty",
+ "qm_mem062_i_mem_prty",
+ "qm_mem061_i_mem_prty",
+ "qm_mem059_i_mem_prty",
+ "qm_mem060_i_mem_prty",
+ "qm_mem063_i_mem_prty",
+ "qm_mem064_i_mem_prty",
+ "qm_mem033_i_mem_prty",
+ "qm_mem032_i_mem_prty",
+ "qm_mem030_i_mem_prty",
+ "qm_mem031_i_mem_prty",
+ "qm_mem034_i_mem_prty",
+ "qm_mem035_i_mem_prty",
+ "qm_mem051_i_mem_prty",
+ "qm_mem042_i_ecc_0_rf_int",
+ "qm_mem042_i_ecc_1_rf_int",
+ "qm_mem041_i_ecc_0_rf_int",
+ "qm_mem041_i_ecc_1_rf_int",
+ "qm_mem048_i_ecc_rf_int",
+ "qm_mem009_i_mem_prty",
+ "qm_mem008_i_mem_prty",
+ "qm_mem011_i_mem_prty",
+ "qm_mem010_i_mem_prty",
+ "qm_mem012_i_mem_prty",
+ "qm_mem014_i_mem_prty",
+ "qm_mem013_i_mem_prty",
+ "qm_mem028_i_mem_prty",
+ "qm_mem027_i_mem_prty",
+ "qm_mem025_i_mem_prty",
+ "qm_mem026_i_mem_prty",
+ "qm_mem029_i_mem_prty",
+ "qm_mem005_i_mem_prty",
+ "qm_mem004_i_mem_prty",
+ "qm_mem002_i_mem_prty",
+ "qm_mem003_i_mem_prty",
+ "qm_mem006_i_mem_prty",
+ "qm_mem007_i_mem_prty",
+ "qm_mem023_i_mem_prty",
+ "qm_mem047_i_mem_prty",
+ "qm_mem049_i_mem_prty",
+ "qm_mem048_i_mem_prty",
+ "qm_mem052_i_mem_prty",
+ "qm_mem050_i_mem_prty",
+ "qm_mem045_i_mem_prty",
+ "qm_mem046_i_mem_prty",
+ "qm_mem043_i_mem_prty",
+ "qm_mem044_i_mem_prty",
+ "qm_mem017_i_mem_prty",
+ "qm_mem016_i_mem_prty",
+ "qm_mem021_i_mem_prty",
+ "qm_mem024_i_mem_prty",
+ "qm_mem019_i_mem_prty",
+ "qm_mem018_i_mem_prty",
+ "qm_mem015_i_mem_prty",
+ "qm_mem022_i_mem_prty",
+ "qm_mem020_i_mem_prty",
+ "qm_mem007_i_mem_prty_0",
+ "qm_mem007_i_mem_prty_1",
+ "qm_mem007_i_mem_prty_2",
+ "qm_mem001_i_mem_prty",
+ "qm_mem043_i_mem_prty_0",
+ "qm_mem043_i_mem_prty_1",
+ "qm_mem043_i_mem_prty_2",
+ "qm_mem007_i_mem_prty_3",
+ "qm_mem007_i_mem_prty_4",
+ "qm_mem007_i_mem_prty_5",
+ "qm_mem007_i_mem_prty_6",
+ "qm_mem007_i_mem_prty_7",
+ "qm_mem007_i_mem_prty_8",
+ "qm_mem007_i_mem_prty_9",
+ "qm_mem007_i_mem_prty_10",
+ "qm_mem007_i_mem_prty_11",
+ "qm_mem007_i_mem_prty_12",
+ "qm_mem007_i_mem_prty_13",
+ "qm_mem007_i_mem_prty_14",
+ "qm_mem007_i_mem_prty_15",
+ "qm_mem043_i_mem_prty_3",
+ "qm_mem043_i_mem_prty_4",
+ "qm_mem043_i_mem_prty_5",
+ "qm_mem043_i_mem_prty_6",
+ "qm_mem043_i_mem_prty_7",
+};
+#else
+#define qm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 qm_prty0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_bb_a0 = {
+ 0, 11, qm_prty0_bb_a0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_bb_a0_attn_idx[31] = {
+ 17, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+};
+
+static struct attn_hw_reg qm_prty1_bb_a0 = {
+ 1, 31, qm_prty1_bb_a0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_bb_a0_attn_idx[31] = {
+ 66, 67, 69, 70, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 87, 20, 18, 25,
+ 27, 32, 24, 26, 41, 31, 29, 28, 30, 23, 88, 89, 90,
+};
+
+static struct attn_hw_reg qm_prty2_bb_a0 = {
+ 2, 31, qm_prty2_bb_a0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_bb_a0_attn_idx[11] = {
+ 104, 105, 106, 107, 108, 33, 16, 34, 19, 72, 71,
+};
+
+static struct attn_hw_reg qm_prty3_bb_a0 = {
+ 3, 11, qm_prty3_bb_a0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_bb_a0_regs[4] = {
+ &qm_prty0_bb_a0, &qm_prty1_bb_a0, &qm_prty2_bb_a0, &qm_prty3_bb_a0,
+};
+
+static const u16 qm_prty0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_bb_b0 = {
+ 0, 11, qm_prty0_bb_b0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_bb_b0_attn_idx[31] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg qm_prty1_bb_b0 = {
+ 1, 31, qm_prty1_bb_b0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_bb_b0_attn_idx[31] = {
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78,
+ 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86,
+};
+
+static struct attn_hw_reg qm_prty2_bb_b0 = {
+ 2, 31, qm_prty2_bb_b0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_bb_b0_attn_idx[11] = {
+ 91, 92, 93, 94, 95, 55, 87, 54, 61, 50, 47,
+};
+
+static struct attn_hw_reg qm_prty3_bb_b0 = {
+ 3, 11, qm_prty3_bb_b0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
+ &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0,
+};
+
+static const u16 qm_prty0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_k2 = {
+ 0, 11, qm_prty0_k2_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_k2_attn_idx[31] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg qm_prty1_k2 = {
+ 1, 31, qm_prty1_k2_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_k2_attn_idx[31] = {
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78,
+ 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86,
+};
+
+static struct attn_hw_reg qm_prty2_k2 = {
+ 2, 31, qm_prty2_k2_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_k2_attn_idx[19] = {
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 55, 87, 54, 61,
+ 50, 47,
+};
+
+static struct attn_hw_reg qm_prty3_k2 = {
+ 3, 19, qm_prty3_k2_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_k2_regs[4] = {
+ &qm_prty0_k2, &qm_prty1_k2, &qm_prty2_k2, &qm_prty3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tm_int_attn_desc[43] = {
+ "tm_address_error",
+ "tm_pxp_read_data_fifo_ov",
+ "tm_pxp_read_data_fifo_un",
+ "tm_pxp_read_ctrl_fifo_ov",
+ "tm_pxp_read_ctrl_fifo_un",
+ "tm_cfc_load_command_fifo_ov",
+ "tm_cfc_load_command_fifo_un",
+ "tm_cfc_load_echo_fifo_ov",
+ "tm_cfc_load_echo_fifo_un",
+ "tm_client_out_fifo_ov",
+ "tm_client_out_fifo_un",
+ "tm_ac_command_fifo_ov",
+ "tm_ac_command_fifo_un",
+ "tm_client_in_pbf_fifo_ov",
+ "tm_client_in_pbf_fifo_un",
+ "tm_client_in_ucm_fifo_ov",
+ "tm_client_in_ucm_fifo_un",
+ "tm_client_in_tcm_fifo_ov",
+ "tm_client_in_tcm_fifo_un",
+ "tm_client_in_xcm_fifo_ov",
+ "tm_client_in_xcm_fifo_un",
+ "tm_expiration_cmd_fifo_ov",
+ "tm_expiration_cmd_fifo_un",
+ "tm_stop_all_lc_invalid",
+ "tm_command_lc_invalid_0",
+ "tm_command_lc_invalid_1",
+ "tm_init_command_lc_valid",
+ "tm_stop_all_exp_lc_valid",
+ "tm_command_cid_invalid_0",
+ "tm_reserved_command",
+ "tm_command_cid_invalid_1",
+ "tm_cload_res_loaderr_conn",
+ "tm_cload_res_loadcancel_conn",
+ "tm_cload_res_validerr_conn",
+ "tm_context_rd_last",
+ "tm_context_wr_last",
+ "tm_pxp_rd_data_eop_bvalid",
+ "tm_pend_conn_scan",
+ "tm_pend_task_scan",
+ "tm_pxp_rd_data_eop_error",
+ "tm_cload_res_loaderr_task",
+ "tm_cload_res_loadcancel_task",
+ "tm_cload_res_validerr_task",
+};
+#else
+#define tm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tm_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_bb_a0 = {
+ 0, 32, tm_int0_bb_a0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_bb_a0_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_bb_a0 = {
+ 1, 11, tm_int1_bb_a0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_bb_a0_regs[2] = {
+ &tm_int0_bb_a0, &tm_int1_bb_a0,
+};
+
+static const u16 tm_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_bb_b0 = {
+ 0, 32, tm_int0_bb_b0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_bb_b0_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_bb_b0 = {
+ 1, 11, tm_int1_bb_b0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
+ &tm_int0_bb_b0, &tm_int1_bb_b0,
+};
+
+static const u16 tm_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_k2 = {
+ 0, 32, tm_int0_k2_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_k2_attn_idx[11] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_k2 = {
+ 1, 11, tm_int1_k2_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_k2_regs[2] = {
+ &tm_int0_k2, &tm_int1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tm_prty_attn_desc[17] = {
+ "tm_mem012_i_ecc_0_rf_int",
+ "tm_mem012_i_ecc_1_rf_int",
+ "tm_mem003_i_ecc_rf_int",
+ "tm_mem016_i_mem_prty",
+ "tm_mem007_i_mem_prty",
+ "tm_mem010_i_mem_prty",
+ "tm_mem008_i_mem_prty",
+ "tm_mem009_i_mem_prty",
+ "tm_mem013_i_mem_prty",
+ "tm_mem015_i_mem_prty",
+ "tm_mem014_i_mem_prty",
+ "tm_mem004_i_mem_prty",
+ "tm_mem005_i_mem_prty",
+ "tm_mem006_i_mem_prty",
+ "tm_mem011_i_mem_prty",
+ "tm_mem001_i_mem_prty",
+ "tm_mem002_i_mem_prty",
+};
+#else
+#define tm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tm_prty1_bb_a0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_bb_a0 = {
+ 0, 17, tm_prty1_bb_a0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_bb_a0_regs[1] = {
+ &tm_prty1_bb_a0,
+};
+
+static const u16 tm_prty1_bb_b0_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_bb_b0 = {
+ 0, 17, tm_prty1_bb_b0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
+ &tm_prty1_bb_b0,
+};
+
+static const u16 tm_prty1_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_k2 = {
+ 0, 17, tm_prty1_k2_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_k2_regs[1] = {
+ &tm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dorq_int_attn_desc[9] = {
+ "dorq_address_error",
+ "dorq_db_drop",
+ "dorq_dorq_fifo_ovfl_err",
+ "dorq_dorq_fifo_afull",
+ "dorq_cfc_byp_validation_err",
+ "dorq_cfc_ld_resp_err",
+ "dorq_xcm_done_cnt_err",
+ "dorq_cfc_ld_req_fifo_ovfl_err",
+ "dorq_cfc_ld_req_fifo_under_err",
+};
+#else
+#define dorq_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dorq_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_bb_a0 = {
+ 0, 9, dorq_int0_bb_a0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_bb_a0_regs[1] = {
+ &dorq_int0_bb_a0,
+};
+
+static const u16 dorq_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_bb_b0 = {
+ 0, 9, dorq_int0_bb_b0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
+ &dorq_int0_bb_b0,
+};
+
+static const u16 dorq_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_k2 = {
+ 0, 9, dorq_int0_k2_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_k2_regs[1] = {
+ &dorq_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dorq_prty_attn_desc[7] = {
+ "dorq_datapath_registers",
+ "dorq_mem002_i_ecc_rf_int",
+ "dorq_mem001_i_mem_prty",
+ "dorq_mem003_i_mem_prty",
+ "dorq_mem004_i_mem_prty",
+ "dorq_mem005_i_mem_prty",
+ "dorq_mem006_i_mem_prty",
+};
+#else
+#define dorq_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dorq_prty1_bb_a0_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_bb_a0 = {
+ 0, 6, dorq_prty1_bb_a0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_bb_a0_regs[1] = {
+ &dorq_prty1_bb_a0,
+};
+
+static const u16 dorq_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dorq_prty0_bb_b0 = {
+ 0, 1, dorq_prty0_bb_b0_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194
+};
+
+static const u16 dorq_prty1_bb_b0_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_bb_b0 = {
+ 1, 6, dorq_prty1_bb_b0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
+ &dorq_prty0_bb_b0, &dorq_prty1_bb_b0,
+};
+
+static const u16 dorq_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dorq_prty0_k2 = {
+ 0, 1, dorq_prty0_k2_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194
+};
+
+static const u16 dorq_prty1_k2_attn_idx[6] = {
+ 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_k2 = {
+ 1, 6, dorq_prty1_k2_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_k2_regs[2] = {
+ &dorq_prty0_k2, &dorq_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *brb_int_attn_desc[237] = {
+ "brb_address_error",
+ "brb_rc_pkt0_rls_error",
+ "brb_rc_pkt0_1st_error",
+ "brb_rc_pkt0_len_error",
+ "brb_rc_pkt0_middle_error",
+ "brb_rc_pkt0_protocol_error",
+ "brb_rc_pkt1_rls_error",
+ "brb_rc_pkt1_1st_error",
+ "brb_rc_pkt1_len_error",
+ "brb_rc_pkt1_middle_error",
+ "brb_rc_pkt1_protocol_error",
+ "brb_rc_pkt2_rls_error",
+ "brb_rc_pkt2_1st_error",
+ "brb_rc_pkt2_len_error",
+ "brb_rc_pkt2_middle_error",
+ "brb_rc_pkt2_protocol_error",
+ "brb_rc_pkt3_rls_error",
+ "brb_rc_pkt3_1st_error",
+ "brb_rc_pkt3_len_error",
+ "brb_rc_pkt3_middle_error",
+ "brb_rc_pkt3_protocol_error",
+ "brb_rc_sop_req_tc_port_error",
+ "brb_uncomplient_lossless_error",
+ "brb_wc0_protocol_error",
+ "brb_wc1_protocol_error",
+ "brb_wc2_protocol_error",
+ "brb_wc3_protocol_error",
+ "brb_ll_arb_prefetch_sop_error",
+ "brb_ll_blk_error",
+ "brb_packet_counter_error",
+ "brb_byte_counter_error",
+ "brb_mac0_fc_cnt_error",
+ "brb_mac1_fc_cnt_error",
+ "brb_ll_arb_calc_error",
+ "brb_unused_0",
+ "brb_wc0_inp_fifo_error",
+ "brb_wc0_sop_fifo_error",
+ "brb_unused_1",
+ "brb_wc0_eop_fifo_error",
+ "brb_wc0_queue_fifo_error",
+ "brb_wc0_free_point_fifo_error",
+ "brb_wc0_next_point_fifo_error",
+ "brb_wc0_strt_fifo_error",
+ "brb_wc0_second_dscr_fifo_error",
+ "brb_wc0_pkt_avail_fifo_error",
+ "brb_wc0_cos_cnt_fifo_error",
+ "brb_wc0_notify_fifo_error",
+ "brb_wc0_ll_req_fifo_error",
+ "brb_wc0_ll_pa_cnt_error",
+ "brb_wc0_bb_pa_cnt_error",
+ "brb_wc1_inp_fifo_error",
+ "brb_wc1_sop_fifo_error",
+ "brb_wc1_eop_fifo_error",
+ "brb_wc1_queue_fifo_error",
+ "brb_wc1_free_point_fifo_error",
+ "brb_wc1_next_point_fifo_error",
+ "brb_wc1_strt_fifo_error",
+ "brb_wc1_second_dscr_fifo_error",
+ "brb_wc1_pkt_avail_fifo_error",
+ "brb_wc1_cos_cnt_fifo_error",
+ "brb_wc1_notify_fifo_error",
+ "brb_wc1_ll_req_fifo_error",
+ "brb_wc1_ll_pa_cnt_error",
+ "brb_wc1_bb_pa_cnt_error",
+ "brb_wc2_inp_fifo_error",
+ "brb_wc2_sop_fifo_error",
+ "brb_wc2_eop_fifo_error",
+ "brb_wc2_queue_fifo_error",
+ "brb_wc2_free_point_fifo_error",
+ "brb_wc2_next_point_fifo_error",
+ "brb_wc2_strt_fifo_error",
+ "brb_wc2_second_dscr_fifo_error",
+ "brb_wc2_pkt_avail_fifo_error",
+ "brb_wc2_cos_cnt_fifo_error",
+ "brb_wc2_notify_fifo_error",
+ "brb_wc2_ll_req_fifo_error",
+ "brb_wc2_ll_pa_cnt_error",
+ "brb_wc2_bb_pa_cnt_error",
+ "brb_wc3_inp_fifo_error",
+ "brb_wc3_sop_fifo_error",
+ "brb_wc3_eop_fifo_error",
+ "brb_wc3_queue_fifo_error",
+ "brb_wc3_free_point_fifo_error",
+ "brb_wc3_next_point_fifo_error",
+ "brb_wc3_strt_fifo_error",
+ "brb_wc3_second_dscr_fifo_error",
+ "brb_wc3_pkt_avail_fifo_error",
+ "brb_wc3_cos_cnt_fifo_error",
+ "brb_wc3_notify_fifo_error",
+ "brb_wc3_ll_req_fifo_error",
+ "brb_wc3_ll_pa_cnt_error",
+ "brb_wc3_bb_pa_cnt_error",
+ "brb_rc_pkt0_side_fifo_error",
+ "brb_rc_pkt0_req_fifo_error",
+ "brb_rc_pkt0_blk_fifo_error",
+ "brb_rc_pkt0_rls_left_fifo_error",
+ "brb_rc_pkt0_strt_ptr_fifo_error",
+ "brb_rc_pkt0_second_ptr_fifo_error",
+ "brb_rc_pkt0_rsp_fifo_error",
+ "brb_rc_pkt0_dscr_fifo_error",
+ "brb_rc_pkt1_side_fifo_error",
+ "brb_rc_pkt1_req_fifo_error",
+ "brb_rc_pkt1_blk_fifo_error",
+ "brb_rc_pkt1_rls_left_fifo_error",
+ "brb_rc_pkt1_strt_ptr_fifo_error",
+ "brb_rc_pkt1_second_ptr_fifo_error",
+ "brb_rc_pkt1_rsp_fifo_error",
+ "brb_rc_pkt1_dscr_fifo_error",
+ "brb_rc_pkt2_side_fifo_error",
+ "brb_rc_pkt2_req_fifo_error",
+ "brb_rc_pkt2_blk_fifo_error",
+ "brb_rc_pkt2_rls_left_fifo_error",
+ "brb_rc_pkt2_strt_ptr_fifo_error",
+ "brb_rc_pkt2_second_ptr_fifo_error",
+ "brb_rc_pkt2_rsp_fifo_error",
+ "brb_rc_pkt2_dscr_fifo_error",
+ "brb_rc_pkt3_side_fifo_error",
+ "brb_rc_pkt3_req_fifo_error",
+ "brb_rc_pkt3_blk_fifo_error",
+ "brb_rc_pkt3_rls_left_fifo_error",
+ "brb_rc_pkt3_strt_ptr_fifo_error",
+ "brb_rc_pkt3_second_ptr_fifo_error",
+ "brb_rc_pkt3_rsp_fifo_error",
+ "brb_rc_pkt3_dscr_fifo_error",
+ "brb_rc_sop_strt_fifo_error",
+ "brb_rc_sop_req_fifo_error",
+ "brb_rc_sop_dscr_fifo_error",
+ "brb_rc_sop_queue_fifo_error",
+ "brb_rc0_eop_error",
+ "brb_rc1_eop_error",
+ "brb_ll_arb_rls_fifo_error",
+ "brb_ll_arb_prefetch_fifo_error",
+ "brb_rc_pkt0_rls_fifo_error",
+ "brb_rc_pkt1_rls_fifo_error",
+ "brb_rc_pkt2_rls_fifo_error",
+ "brb_rc_pkt3_rls_fifo_error",
+ "brb_rc_pkt4_rls_fifo_error",
+ "brb_rc_pkt4_rls_error",
+ "brb_rc_pkt4_1st_error",
+ "brb_rc_pkt4_len_error",
+ "brb_rc_pkt4_middle_error",
+ "brb_rc_pkt4_protocol_error",
+ "brb_rc_pkt4_side_fifo_error",
+ "brb_rc_pkt4_req_fifo_error",
+ "brb_rc_pkt4_blk_fifo_error",
+ "brb_rc_pkt4_rls_left_fifo_error",
+ "brb_rc_pkt4_strt_ptr_fifo_error",
+ "brb_rc_pkt4_second_ptr_fifo_error",
+ "brb_rc_pkt4_rsp_fifo_error",
+ "brb_rc_pkt4_dscr_fifo_error",
+ "brb_rc_pkt5_rls_error",
+ "brb_packet_available_sync_fifo_push_error",
+ "brb_wc4_protocol_error",
+ "brb_wc5_protocol_error",
+ "brb_wc6_protocol_error",
+ "brb_wc7_protocol_error",
+ "brb_wc4_inp_fifo_error",
+ "brb_wc4_sop_fifo_error",
+ "brb_wc4_queue_fifo_error",
+ "brb_wc4_free_point_fifo_error",
+ "brb_wc4_next_point_fifo_error",
+ "brb_wc4_strt_fifo_error",
+ "brb_wc4_second_dscr_fifo_error",
+ "brb_wc4_pkt_avail_fifo_error",
+ "brb_wc4_cos_cnt_fifo_error",
+ "brb_wc4_notify_fifo_error",
+ "brb_wc4_ll_req_fifo_error",
+ "brb_wc4_ll_pa_cnt_error",
+ "brb_wc4_bb_pa_cnt_error",
+ "brb_wc5_inp_fifo_error",
+ "brb_wc5_sop_fifo_error",
+ "brb_wc5_queue_fifo_error",
+ "brb_wc5_free_point_fifo_error",
+ "brb_wc5_next_point_fifo_error",
+ "brb_wc5_strt_fifo_error",
+ "brb_wc5_second_dscr_fifo_error",
+ "brb_wc5_pkt_avail_fifo_error",
+ "brb_wc5_cos_cnt_fifo_error",
+ "brb_wc5_notify_fifo_error",
+ "brb_wc5_ll_req_fifo_error",
+ "brb_wc5_ll_pa_cnt_error",
+ "brb_wc5_bb_pa_cnt_error",
+ "brb_wc6_inp_fifo_error",
+ "brb_wc6_sop_fifo_error",
+ "brb_wc6_queue_fifo_error",
+ "brb_wc6_free_point_fifo_error",
+ "brb_wc6_next_point_fifo_error",
+ "brb_wc6_strt_fifo_error",
+ "brb_wc6_second_dscr_fifo_error",
+ "brb_wc6_pkt_avail_fifo_error",
+ "brb_wc6_cos_cnt_fifo_error",
+ "brb_wc6_notify_fifo_error",
+ "brb_wc6_ll_req_fifo_error",
+ "brb_wc6_ll_pa_cnt_error",
+ "brb_wc6_bb_pa_cnt_error",
+ "brb_wc7_inp_fifo_error",
+ "brb_wc7_sop_fifo_error",
+ "brb_wc7_queue_fifo_error",
+ "brb_wc7_free_point_fifo_error",
+ "brb_wc7_next_point_fifo_error",
+ "brb_wc7_strt_fifo_error",
+ "brb_wc7_second_dscr_fifo_error",
+ "brb_wc7_pkt_avail_fifo_error",
+ "brb_wc7_cos_cnt_fifo_error",
+ "brb_wc7_notify_fifo_error",
+ "brb_wc7_ll_req_fifo_error",
+ "brb_wc7_ll_pa_cnt_error",
+ "brb_wc7_bb_pa_cnt_error",
+ "brb_wc9_queue_fifo_error",
+ "brb_rc_sop_inp_sync_fifo_push_error",
+ "brb_rc0_inp_sync_fifo_push_error",
+ "brb_rc1_inp_sync_fifo_push_error",
+ "brb_rc2_inp_sync_fifo_push_error",
+ "brb_rc3_inp_sync_fifo_push_error",
+ "brb_rc0_out_sync_fifo_push_error",
+ "brb_rc1_out_sync_fifo_push_error",
+ "brb_rc2_out_sync_fifo_push_error",
+ "brb_rc3_out_sync_fifo_push_error",
+ "brb_rc4_out_sync_fifo_push_error",
+ "brb_unused_2",
+ "brb_rc0_eop_inp_sync_fifo_push_error",
+ "brb_rc1_eop_inp_sync_fifo_push_error",
+ "brb_rc2_eop_inp_sync_fifo_push_error",
+ "brb_rc3_eop_inp_sync_fifo_push_error",
+ "brb_rc0_eop_out_sync_fifo_push_error",
+ "brb_rc1_eop_out_sync_fifo_push_error",
+ "brb_rc2_eop_out_sync_fifo_push_error",
+ "brb_rc3_eop_out_sync_fifo_push_error",
+ "brb_unused_3",
+ "brb_rc2_eop_error",
+ "brb_rc3_eop_error",
+ "brb_mac2_fc_cnt_error",
+ "brb_mac3_fc_cnt_error",
+ "brb_wc4_eop_fifo_error",
+ "brb_wc5_eop_fifo_error",
+ "brb_wc6_eop_fifo_error",
+ "brb_wc7_eop_fifo_error",
+};
+#else
+#define brb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 brb_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_bb_a0 = {
+ 0, 32, brb_int0_bb_a0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_bb_a0_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_bb_a0 = {
+ 1, 30, brb_int1_bb_a0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_bb_a0_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_bb_a0 = {
+ 2, 28, brb_int2_bb_a0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_bb_a0_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_bb_a0 = {
+ 3, 31, brb_int3_bb_a0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_bb_a0_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_bb_a0 = {
+ 4, 27, brb_int4_bb_a0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_bb_a0_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_bb_a0 = {
+ 5, 1, brb_int5_bb_a0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_bb_a0_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_bb_a0 = {
+ 6, 8, brb_int6_bb_a0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_bb_a0_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_bb_a0 = {
+ 7, 32, brb_int7_bb_a0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_bb_a0_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_bb_a0 = {
+ 8, 17, brb_int8_bb_a0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_bb_a0_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_bb_a0 = {
+ 9, 1, brb_int9_bb_a0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_bb_a0_attn_idx[14] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225,
+};
+
+static struct attn_hw_reg brb_int10_bb_a0 = {
+ 10, 14, brb_int10_bb_a0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc,
+ 0x3401b8
+};
+
+static const u16 brb_int11_bb_a0_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_bb_a0 = {
+ 11, 8, brb_int11_bb_a0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_bb_a0_regs[12] = {
+ &brb_int0_bb_a0, &brb_int1_bb_a0, &brb_int2_bb_a0, &brb_int3_bb_a0,
+ &brb_int4_bb_a0, &brb_int5_bb_a0, &brb_int6_bb_a0, &brb_int7_bb_a0,
+ &brb_int8_bb_a0, &brb_int9_bb_a0,
+ &brb_int10_bb_a0, &brb_int11_bb_a0,
+};
+
+static const u16 brb_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_bb_b0 = {
+ 0, 32, brb_int0_bb_b0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_bb_b0_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_bb_b0 = {
+ 1, 30, brb_int1_bb_b0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_bb_b0_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_bb_b0 = {
+ 2, 28, brb_int2_bb_b0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_bb_b0_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_bb_b0 = {
+ 3, 31, brb_int3_bb_b0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_bb_b0_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_bb_b0 = {
+ 4, 27, brb_int4_bb_b0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_bb_b0_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_bb_b0 = {
+ 5, 1, brb_int5_bb_b0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_bb_b0_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_bb_b0 = {
+ 6, 8, brb_int6_bb_b0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_bb_b0_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_bb_b0 = {
+ 7, 32, brb_int7_bb_b0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_bb_b0_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_bb_b0 = {
+ 8, 17, brb_int8_bb_b0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_bb_b0_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_bb_b0 = {
+ 9, 1, brb_int9_bb_b0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_bb_b0_attn_idx[14] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225,
+};
+
+static struct attn_hw_reg brb_int10_bb_b0 = {
+ 10, 14, brb_int10_bb_b0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc,
+ 0x3401b8
+};
+
+static const u16 brb_int11_bb_b0_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_bb_b0 = {
+ 11, 8, brb_int11_bb_b0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
+ &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
+ &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
+ &brb_int8_bb_b0, &brb_int9_bb_b0,
+ &brb_int10_bb_b0, &brb_int11_bb_b0,
+};
+
+static const u16 brb_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_k2 = {
+ 0, 32, brb_int0_k2_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_k2_attn_idx[30] = {
+ 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_k2 = {
+ 1, 30, brb_int1_k2_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_k2_attn_idx[28] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_k2 = {
+ 2, 28, brb_int2_k2_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_k2_attn_idx[31] = {
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+ 122,
+};
+
+static struct attn_hw_reg brb_int3_k2 = {
+ 3, 31, brb_int3_k2_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_k2_attn_idx[27] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+ 137,
+ 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_k2 = {
+ 4, 27, brb_int4_k2_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_k2_attn_idx[1] = {
+ 150,
+};
+
+static struct attn_hw_reg brb_int5_k2 = {
+ 5, 1, brb_int5_k2_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_k2_attn_idx[8] = {
+ 151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_k2 = {
+ 6, 8, brb_int6_k2_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_k2_attn_idx[32] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189,
+ 190,
+};
+
+static struct attn_hw_reg brb_int7_k2 = {
+ 7, 32, brb_int7_k2_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_k2_attn_idx[17] = {
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205,
+ 206, 207,
+};
+
+static struct attn_hw_reg brb_int8_k2 = {
+ 8, 17, brb_int8_k2_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_k2_attn_idx[1] = {
+ 208,
+};
+
+static struct attn_hw_reg brb_int9_k2 = {
+ 9, 1, brb_int9_k2_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_k2_attn_idx[18] = {
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 222, 223,
+ 224,
+ 225, 226, 227,
+};
+
+static struct attn_hw_reg brb_int10_k2 = {
+ 10, 18, brb_int10_k2_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8
+};
+
+static const u16 brb_int11_k2_attn_idx[8] = {
+ 229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_k2 = {
+ 11, 8, brb_int11_k2_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_k2_regs[12] = {
+ &brb_int0_k2, &brb_int1_k2, &brb_int2_k2, &brb_int3_k2, &brb_int4_k2,
+ &brb_int5_k2, &brb_int6_k2, &brb_int7_k2, &brb_int8_k2, &brb_int9_k2,
+ &brb_int10_k2, &brb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *brb_prty_attn_desc[75] = {
+ "brb_ll_bank0_mem_prty",
+ "brb_ll_bank1_mem_prty",
+ "brb_ll_bank2_mem_prty",
+ "brb_ll_bank3_mem_prty",
+ "brb_datapath_registers",
+ "brb_mem001_i_ecc_rf_int",
+ "brb_mem008_i_ecc_rf_int",
+ "brb_mem009_i_ecc_rf_int",
+ "brb_mem010_i_ecc_rf_int",
+ "brb_mem011_i_ecc_rf_int",
+ "brb_mem012_i_ecc_rf_int",
+ "brb_mem013_i_ecc_rf_int",
+ "brb_mem014_i_ecc_rf_int",
+ "brb_mem015_i_ecc_rf_int",
+ "brb_mem016_i_ecc_rf_int",
+ "brb_mem002_i_ecc_rf_int",
+ "brb_mem003_i_ecc_rf_int",
+ "brb_mem004_i_ecc_rf_int",
+ "brb_mem005_i_ecc_rf_int",
+ "brb_mem006_i_ecc_rf_int",
+ "brb_mem007_i_ecc_rf_int",
+ "brb_mem070_i_mem_prty",
+ "brb_mem069_i_mem_prty",
+ "brb_mem053_i_mem_prty",
+ "brb_mem054_i_mem_prty",
+ "brb_mem055_i_mem_prty",
+ "brb_mem056_i_mem_prty",
+ "brb_mem057_i_mem_prty",
+ "brb_mem058_i_mem_prty",
+ "brb_mem059_i_mem_prty",
+ "brb_mem060_i_mem_prty",
+ "brb_mem061_i_mem_prty",
+ "brb_mem062_i_mem_prty",
+ "brb_mem063_i_mem_prty",
+ "brb_mem064_i_mem_prty",
+ "brb_mem065_i_mem_prty",
+ "brb_mem045_i_mem_prty",
+ "brb_mem046_i_mem_prty",
+ "brb_mem047_i_mem_prty",
+ "brb_mem048_i_mem_prty",
+ "brb_mem049_i_mem_prty",
+ "brb_mem050_i_mem_prty",
+ "brb_mem051_i_mem_prty",
+ "brb_mem052_i_mem_prty",
+ "brb_mem041_i_mem_prty",
+ "brb_mem042_i_mem_prty",
+ "brb_mem043_i_mem_prty",
+ "brb_mem044_i_mem_prty",
+ "brb_mem040_i_mem_prty",
+ "brb_mem035_i_mem_prty",
+ "brb_mem066_i_mem_prty",
+ "brb_mem067_i_mem_prty",
+ "brb_mem068_i_mem_prty",
+ "brb_mem030_i_mem_prty",
+ "brb_mem031_i_mem_prty",
+ "brb_mem032_i_mem_prty",
+ "brb_mem033_i_mem_prty",
+ "brb_mem037_i_mem_prty",
+ "brb_mem038_i_mem_prty",
+ "brb_mem034_i_mem_prty",
+ "brb_mem036_i_mem_prty",
+ "brb_mem017_i_mem_prty",
+ "brb_mem018_i_mem_prty",
+ "brb_mem019_i_mem_prty",
+ "brb_mem020_i_mem_prty",
+ "brb_mem021_i_mem_prty",
+ "brb_mem022_i_mem_prty",
+ "brb_mem023_i_mem_prty",
+ "brb_mem024_i_mem_prty",
+ "brb_mem029_i_mem_prty",
+ "brb_mem026_i_mem_prty",
+ "brb_mem027_i_mem_prty",
+ "brb_mem028_i_mem_prty",
+ "brb_mem025_i_mem_prty",
+ "brb_mem039_i_mem_prty",
+};
+#else
+#define brb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 brb_prty1_bb_a0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36,
+ 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 49,
+};
+
+static struct attn_hw_reg brb_prty1_bb_a0 = {
+ 0, 31, brb_prty1_bb_a0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_bb_a0_attn_idx[19] = {
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 69, 70, 71, 72, 73, 74,
+ 48,
+};
+
+static struct attn_hw_reg brb_prty2_bb_a0 = {
+ 1, 19, brb_prty2_bb_a0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_bb_a0_regs[2] = {
+ &brb_prty1_bb_a0, &brb_prty2_bb_a0,
+};
+
+static const u16 brb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg brb_prty0_bb_b0 = {
+ 0, 5, brb_prty0_bb_b0_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0
+};
+
+static const u16 brb_prty1_bb_b0_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36,
+ 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg brb_prty1_bb_b0 = {
+ 1, 31, brb_prty1_bb_b0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_bb_b0_attn_idx[14] = {
+ 53, 54, 55, 56, 59, 61, 62, 63, 64, 69, 70, 71, 72, 73,
+};
+
+static struct attn_hw_reg brb_prty2_bb_b0 = {
+ 2, 14, brb_prty2_bb_b0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
+ &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0,
+};
+
+static const u16 brb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg brb_prty0_k2 = {
+ 0, 5, brb_prty0_k2_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0
+};
+
+static const u16 brb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg brb_prty1_k2 = {
+ 1, 31, brb_prty1_k2_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_k2_attn_idx[30] = {
+ 50, 51, 52, 36, 37, 38, 39, 40, 41, 42, 43, 47, 53, 54, 55, 56, 57, 58,
+ 59, 49, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+};
+
+static struct attn_hw_reg brb_prty2_k2 = {
+ 2, 30, brb_prty2_k2_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_k2_regs[3] = {
+ &brb_prty0_k2, &brb_prty1_k2, &brb_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *src_int_attn_desc[1] = {
+ "src_address_error",
+};
+#else
+#define src_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 src_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_bb_a0 = {
+ 0, 1, src_int0_bb_a0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_bb_a0_regs[1] = {
+ &src_int0_bb_a0,
+};
+
+static const u16 src_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_bb_b0 = {
+ 0, 1, src_int0_bb_b0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
+ &src_int0_bb_b0,
+};
+
+static const u16 src_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg src_int0_k2 = {
+ 0, 1, src_int0_k2_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_k2_regs[1] = {
+ &src_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prs_int_attn_desc[2] = {
+ "prs_address_error",
+ "prs_lcid_validation_err",
+};
+#else
+#define prs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 prs_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_bb_a0 = {
+ 0, 2, prs_int0_bb_a0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_bb_a0_regs[1] = {
+ &prs_int0_bb_a0,
+};
+
+static const u16 prs_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_bb_b0 = {
+ 0, 2, prs_int0_bb_b0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
+ &prs_int0_bb_b0,
+};
+
+static const u16 prs_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_int0_k2 = {
+ 0, 2, prs_int0_k2_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_k2_regs[1] = {
+ &prs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prs_prty_attn_desc[75] = {
+ "prs_cam_parity",
+ "prs_gft_cam_parity",
+ "prs_mem011_i_ecc_rf_int",
+ "prs_mem012_i_ecc_rf_int",
+ "prs_mem016_i_ecc_rf_int",
+ "prs_mem017_i_ecc_rf_int",
+ "prs_mem021_i_ecc_rf_int",
+ "prs_mem022_i_ecc_rf_int",
+ "prs_mem026_i_ecc_rf_int",
+ "prs_mem027_i_ecc_rf_int",
+ "prs_mem064_i_mem_prty",
+ "prs_mem044_i_mem_prty",
+ "prs_mem043_i_mem_prty",
+ "prs_mem037_i_mem_prty",
+ "prs_mem033_i_mem_prty",
+ "prs_mem034_i_mem_prty",
+ "prs_mem035_i_mem_prty",
+ "prs_mem036_i_mem_prty",
+ "prs_mem029_i_mem_prty",
+ "prs_mem030_i_mem_prty",
+ "prs_mem031_i_mem_prty",
+ "prs_mem032_i_mem_prty",
+ "prs_mem007_i_mem_prty",
+ "prs_mem028_i_mem_prty",
+ "prs_mem039_i_mem_prty",
+ "prs_mem040_i_mem_prty",
+ "prs_mem058_i_mem_prty",
+ "prs_mem059_i_mem_prty",
+ "prs_mem041_i_mem_prty",
+ "prs_mem042_i_mem_prty",
+ "prs_mem060_i_mem_prty",
+ "prs_mem061_i_mem_prty",
+ "prs_mem009_i_mem_prty",
+ "prs_mem009_i_ecc_rf_int",
+ "prs_mem010_i_ecc_rf_int",
+ "prs_mem014_i_ecc_rf_int",
+ "prs_mem015_i_ecc_rf_int",
+ "prs_mem026_i_mem_prty",
+ "prs_mem025_i_mem_prty",
+ "prs_mem021_i_mem_prty",
+ "prs_mem019_i_mem_prty",
+ "prs_mem020_i_mem_prty",
+ "prs_mem017_i_mem_prty",
+ "prs_mem018_i_mem_prty",
+ "prs_mem005_i_mem_prty",
+ "prs_mem016_i_mem_prty",
+ "prs_mem023_i_mem_prty",
+ "prs_mem024_i_mem_prty",
+ "prs_mem008_i_mem_prty",
+ "prs_mem012_i_mem_prty",
+ "prs_mem013_i_mem_prty",
+ "prs_mem006_i_mem_prty",
+ "prs_mem011_i_mem_prty",
+ "prs_mem003_i_mem_prty",
+ "prs_mem004_i_mem_prty",
+ "prs_mem027_i_mem_prty",
+ "prs_mem010_i_mem_prty",
+ "prs_mem014_i_mem_prty",
+ "prs_mem015_i_mem_prty",
+ "prs_mem054_i_mem_prty",
+ "prs_mem055_i_mem_prty",
+ "prs_mem056_i_mem_prty",
+ "prs_mem057_i_mem_prty",
+ "prs_mem046_i_mem_prty",
+ "prs_mem047_i_mem_prty",
+ "prs_mem048_i_mem_prty",
+ "prs_mem049_i_mem_prty",
+ "prs_mem050_i_mem_prty",
+ "prs_mem051_i_mem_prty",
+ "prs_mem052_i_mem_prty",
+ "prs_mem053_i_mem_prty",
+ "prs_mem062_i_mem_prty",
+ "prs_mem045_i_mem_prty",
+ "prs_mem002_i_mem_prty",
+ "prs_mem001_i_mem_prty",
+};
+#else
+#define prs_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 prs_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prs_prty0_bb_a0 = {
+ 0, 1, prs_prty0_bb_a0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_bb_a0_attn_idx[31] = {
+ 13, 14, 15, 16, 18, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+};
+
+static struct attn_hw_reg prs_prty1_bb_a0 = {
+ 1, 31, prs_prty1_bb_a0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_bb_a0_attn_idx[5] = {
+ 73, 74, 20, 17, 19,
+};
+
+static struct attn_hw_reg prs_prty2_bb_a0 = {
+ 2, 5, prs_prty2_bb_a0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_bb_a0_regs[3] = {
+ &prs_prty0_bb_a0, &prs_prty1_bb_a0, &prs_prty2_bb_a0,
+};
+
+static const u16 prs_prty0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_prty0_bb_b0 = {
+ 0, 2, prs_prty0_bb_b0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_bb_b0_attn_idx[31] = {
+ 13, 14, 15, 16, 18, 19, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+};
+
+static struct attn_hw_reg prs_prty1_bb_b0 = {
+ 1, 31, prs_prty1_bb_b0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_bb_b0_attn_idx[5] = {
+ 73, 74, 20, 17, 55,
+};
+
+static struct attn_hw_reg prs_prty2_bb_b0 = {
+ 2, 5, prs_prty2_bb_b0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
+ &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0,
+};
+
+static const u16 prs_prty0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg prs_prty0_k2 = {
+ 0, 2, prs_prty0_k2_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_k2_attn_idx[31] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+};
+
+static struct attn_hw_reg prs_prty1_k2 = {
+ 1, 31, prs_prty1_k2_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_k2_attn_idx[31] = {
+ 56, 57, 58, 40, 41, 47, 38, 48, 50, 43, 46, 59, 60, 61, 62, 53, 54, 44,
+ 51, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+};
+
+static struct attn_hw_reg prs_prty2_k2 = {
+ 2, 31, prs_prty2_k2_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_k2_regs[3] = {
+ &prs_prty0_k2, &prs_prty1_k2, &prs_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsdm_int_attn_desc[28] = {
+ "tsdm_address_error",
+ "tsdm_inp_queue_error",
+ "tsdm_delay_fifo_error",
+ "tsdm_async_host_error",
+ "tsdm_prm_fifo_error",
+ "tsdm_ccfc_load_pend_error",
+ "tsdm_tcfc_load_pend_error",
+ "tsdm_dst_int_ram_wait_error",
+ "tsdm_dst_pas_buf_wait_error",
+ "tsdm_dst_pxp_immed_error",
+ "tsdm_dst_pxp_dst_pend_error",
+ "tsdm_dst_brb_src_pend_error",
+ "tsdm_dst_brb_src_addr_error",
+ "tsdm_rsp_brb_pend_error",
+ "tsdm_rsp_int_ram_pend_error",
+ "tsdm_rsp_brb_rd_data_error",
+ "tsdm_rsp_int_ram_rd_data_error",
+ "tsdm_rsp_pxp_rd_data_error",
+ "tsdm_cm_delay_error",
+ "tsdm_sh_delay_error",
+ "tsdm_cmpl_pend_error",
+ "tsdm_cprm_pend_error",
+ "tsdm_timer_addr_error",
+ "tsdm_timer_pend_error",
+ "tsdm_dorq_dpm_error",
+ "tsdm_dst_pxp_done_error",
+ "tsdm_xcm_rmt_buffer_error",
+ "tsdm_ycm_rmt_buffer_error",
+};
+#else
+#define tsdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg tsdm_int0_bb_a0 = {
+ 0, 26, tsdm_int0_bb_a0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_bb_a0_regs[1] = {
+ &tsdm_int0_bb_a0,
+};
+
+static const u16 tsdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg tsdm_int0_bb_b0 = {
+ 0, 26, tsdm_int0_bb_b0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
+ &tsdm_int0_bb_b0,
+};
+
+static const u16 tsdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg tsdm_int0_k2 = {
+ 0, 28, tsdm_int0_k2_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_k2_regs[1] = {
+ &tsdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsdm_prty_attn_desc[10] = {
+ "tsdm_mem009_i_mem_prty",
+ "tsdm_mem008_i_mem_prty",
+ "tsdm_mem007_i_mem_prty",
+ "tsdm_mem006_i_mem_prty",
+ "tsdm_mem005_i_mem_prty",
+ "tsdm_mem002_i_mem_prty",
+ "tsdm_mem010_i_mem_prty",
+ "tsdm_mem001_i_mem_prty",
+ "tsdm_mem003_i_mem_prty",
+ "tsdm_mem004_i_mem_prty",
+};
+#else
+#define tsdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_bb_a0 = {
+ 0, 10, tsdm_prty1_bb_a0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208,
+ 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_bb_a0_regs[1] = {
+ &tsdm_prty1_bb_a0,
+};
+
+static const u16 tsdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_bb_b0 = {
+ 0, 10, tsdm_prty1_bb_b0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208,
+ 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
+ &tsdm_prty1_bb_b0,
+};
+
+static const u16 tsdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_k2 = {
+ 0, 10, tsdm_prty1_k2_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_k2_regs[1] = {
+ &tsdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msdm_int_attn_desc[28] = {
+ "msdm_address_error",
+ "msdm_inp_queue_error",
+ "msdm_delay_fifo_error",
+ "msdm_async_host_error",
+ "msdm_prm_fifo_error",
+ "msdm_ccfc_load_pend_error",
+ "msdm_tcfc_load_pend_error",
+ "msdm_dst_int_ram_wait_error",
+ "msdm_dst_pas_buf_wait_error",
+ "msdm_dst_pxp_immed_error",
+ "msdm_dst_pxp_dst_pend_error",
+ "msdm_dst_brb_src_pend_error",
+ "msdm_dst_brb_src_addr_error",
+ "msdm_rsp_brb_pend_error",
+ "msdm_rsp_int_ram_pend_error",
+ "msdm_rsp_brb_rd_data_error",
+ "msdm_rsp_int_ram_rd_data_error",
+ "msdm_rsp_pxp_rd_data_error",
+ "msdm_cm_delay_error",
+ "msdm_sh_delay_error",
+ "msdm_cmpl_pend_error",
+ "msdm_cprm_pend_error",
+ "msdm_timer_addr_error",
+ "msdm_timer_pend_error",
+ "msdm_dorq_dpm_error",
+ "msdm_dst_pxp_done_error",
+ "msdm_xcm_rmt_buffer_error",
+ "msdm_ycm_rmt_buffer_error",
+};
+#else
+#define msdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 msdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg msdm_int0_bb_a0 = {
+ 0, 26, msdm_int0_bb_a0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_bb_a0_regs[1] = {
+ &msdm_int0_bb_a0,
+};
+
+static const u16 msdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg msdm_int0_bb_b0 = {
+ 0, 26, msdm_int0_bb_b0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
+ &msdm_int0_bb_b0,
+};
+
+static const u16 msdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg msdm_int0_k2 = {
+ 0, 28, msdm_int0_k2_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_k2_regs[1] = {
+ &msdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msdm_prty_attn_desc[11] = {
+ "msdm_mem009_i_mem_prty",
+ "msdm_mem008_i_mem_prty",
+ "msdm_mem007_i_mem_prty",
+ "msdm_mem006_i_mem_prty",
+ "msdm_mem005_i_mem_prty",
+ "msdm_mem002_i_mem_prty",
+ "msdm_mem011_i_mem_prty",
+ "msdm_mem001_i_mem_prty",
+ "msdm_mem003_i_mem_prty",
+ "msdm_mem004_i_mem_prty",
+ "msdm_mem010_i_mem_prty",
+};
+#else
+#define msdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 msdm_prty1_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_bb_a0 = {
+ 0, 11, msdm_prty1_bb_a0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208,
+ 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_bb_a0_regs[1] = {
+ &msdm_prty1_bb_a0,
+};
+
+static const u16 msdm_prty1_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_bb_b0 = {
+ 0, 11, msdm_prty1_bb_b0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208,
+ 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
+ &msdm_prty1_bb_b0,
+};
+
+static const u16 msdm_prty1_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_k2 = {
+ 0, 11, msdm_prty1_k2_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_k2_regs[1] = {
+ &msdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usdm_int_attn_desc[28] = {
+ "usdm_address_error",
+ "usdm_inp_queue_error",
+ "usdm_delay_fifo_error",
+ "usdm_async_host_error",
+ "usdm_prm_fifo_error",
+ "usdm_ccfc_load_pend_error",
+ "usdm_tcfc_load_pend_error",
+ "usdm_dst_int_ram_wait_error",
+ "usdm_dst_pas_buf_wait_error",
+ "usdm_dst_pxp_immed_error",
+ "usdm_dst_pxp_dst_pend_error",
+ "usdm_dst_brb_src_pend_error",
+ "usdm_dst_brb_src_addr_error",
+ "usdm_rsp_brb_pend_error",
+ "usdm_rsp_int_ram_pend_error",
+ "usdm_rsp_brb_rd_data_error",
+ "usdm_rsp_int_ram_rd_data_error",
+ "usdm_rsp_pxp_rd_data_error",
+ "usdm_cm_delay_error",
+ "usdm_sh_delay_error",
+ "usdm_cmpl_pend_error",
+ "usdm_cprm_pend_error",
+ "usdm_timer_addr_error",
+ "usdm_timer_pend_error",
+ "usdm_dorq_dpm_error",
+ "usdm_dst_pxp_done_error",
+ "usdm_xcm_rmt_buffer_error",
+ "usdm_ycm_rmt_buffer_error",
+};
+#else
+#define usdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 usdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg usdm_int0_bb_a0 = {
+ 0, 26, usdm_int0_bb_a0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_bb_a0_regs[1] = {
+ &usdm_int0_bb_a0,
+};
+
+static const u16 usdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg usdm_int0_bb_b0 = {
+ 0, 26, usdm_int0_bb_b0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
+ &usdm_int0_bb_b0,
+};
+
+static const u16 usdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg usdm_int0_k2 = {
+ 0, 28, usdm_int0_k2_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_k2_regs[1] = {
+ &usdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usdm_prty_attn_desc[10] = {
+ "usdm_mem008_i_mem_prty",
+ "usdm_mem007_i_mem_prty",
+ "usdm_mem006_i_mem_prty",
+ "usdm_mem005_i_mem_prty",
+ "usdm_mem002_i_mem_prty",
+ "usdm_mem010_i_mem_prty",
+ "usdm_mem001_i_mem_prty",
+ "usdm_mem003_i_mem_prty",
+ "usdm_mem004_i_mem_prty",
+ "usdm_mem009_i_mem_prty",
+};
+#else
+#define usdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 usdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_bb_a0 = {
+ 0, 10, usdm_prty1_bb_a0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208,
+ 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_bb_a0_regs[1] = {
+ &usdm_prty1_bb_a0,
+};
+
+static const u16 usdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_bb_b0 = {
+ 0, 10, usdm_prty1_bb_b0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208,
+ 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
+ &usdm_prty1_bb_b0,
+};
+
+static const u16 usdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_k2 = {
+ 0, 10, usdm_prty1_k2_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_k2_regs[1] = {
+ &usdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsdm_int_attn_desc[28] = {
+ "xsdm_address_error",
+ "xsdm_inp_queue_error",
+ "xsdm_delay_fifo_error",
+ "xsdm_async_host_error",
+ "xsdm_prm_fifo_error",
+ "xsdm_ccfc_load_pend_error",
+ "xsdm_tcfc_load_pend_error",
+ "xsdm_dst_int_ram_wait_error",
+ "xsdm_dst_pas_buf_wait_error",
+ "xsdm_dst_pxp_immed_error",
+ "xsdm_dst_pxp_dst_pend_error",
+ "xsdm_dst_brb_src_pend_error",
+ "xsdm_dst_brb_src_addr_error",
+ "xsdm_rsp_brb_pend_error",
+ "xsdm_rsp_int_ram_pend_error",
+ "xsdm_rsp_brb_rd_data_error",
+ "xsdm_rsp_int_ram_rd_data_error",
+ "xsdm_rsp_pxp_rd_data_error",
+ "xsdm_cm_delay_error",
+ "xsdm_sh_delay_error",
+ "xsdm_cmpl_pend_error",
+ "xsdm_cprm_pend_error",
+ "xsdm_timer_addr_error",
+ "xsdm_timer_pend_error",
+ "xsdm_dorq_dpm_error",
+ "xsdm_dst_pxp_done_error",
+ "xsdm_xcm_rmt_buffer_error",
+ "xsdm_ycm_rmt_buffer_error",
+};
+#else
+#define xsdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg xsdm_int0_bb_a0 = {
+ 0, 26, xsdm_int0_bb_a0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_bb_a0_regs[1] = {
+ &xsdm_int0_bb_a0,
+};
+
+static const u16 xsdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg xsdm_int0_bb_b0 = {
+ 0, 26, xsdm_int0_bb_b0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
+ &xsdm_int0_bb_b0,
+};
+
+static const u16 xsdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg xsdm_int0_k2 = {
+ 0, 28, xsdm_int0_k2_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_k2_regs[1] = {
+ &xsdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsdm_prty_attn_desc[10] = {
+ "xsdm_mem009_i_mem_prty",
+ "xsdm_mem008_i_mem_prty",
+ "xsdm_mem007_i_mem_prty",
+ "xsdm_mem006_i_mem_prty",
+ "xsdm_mem003_i_mem_prty",
+ "xsdm_mem010_i_mem_prty",
+ "xsdm_mem002_i_mem_prty",
+ "xsdm_mem004_i_mem_prty",
+ "xsdm_mem005_i_mem_prty",
+ "xsdm_mem001_i_mem_prty",
+};
+#else
+#define xsdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsdm_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_bb_a0 = {
+ 0, 10, xsdm_prty1_bb_a0_attn_idx, 0xf80200, 0xf8020c, 0xf80208,
+ 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_bb_a0_regs[1] = {
+ &xsdm_prty1_bb_a0,
+};
+
+static const u16 xsdm_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_bb_b0 = {
+ 0, 10, xsdm_prty1_bb_b0_attn_idx, 0xf80200, 0xf8020c, 0xf80208,
+ 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
+ &xsdm_prty1_bb_b0,
+};
+
+static const u16 xsdm_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_k2 = {
+ 0, 10, xsdm_prty1_k2_attn_idx, 0xf80200, 0xf8020c, 0xf80208, 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_k2_regs[1] = {
+ &xsdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysdm_int_attn_desc[28] = {
+ "ysdm_address_error",
+ "ysdm_inp_queue_error",
+ "ysdm_delay_fifo_error",
+ "ysdm_async_host_error",
+ "ysdm_prm_fifo_error",
+ "ysdm_ccfc_load_pend_error",
+ "ysdm_tcfc_load_pend_error",
+ "ysdm_dst_int_ram_wait_error",
+ "ysdm_dst_pas_buf_wait_error",
+ "ysdm_dst_pxp_immed_error",
+ "ysdm_dst_pxp_dst_pend_error",
+ "ysdm_dst_brb_src_pend_error",
+ "ysdm_dst_brb_src_addr_error",
+ "ysdm_rsp_brb_pend_error",
+ "ysdm_rsp_int_ram_pend_error",
+ "ysdm_rsp_brb_rd_data_error",
+ "ysdm_rsp_int_ram_rd_data_error",
+ "ysdm_rsp_pxp_rd_data_error",
+ "ysdm_cm_delay_error",
+ "ysdm_sh_delay_error",
+ "ysdm_cmpl_pend_error",
+ "ysdm_cprm_pend_error",
+ "ysdm_timer_addr_error",
+ "ysdm_timer_pend_error",
+ "ysdm_dorq_dpm_error",
+ "ysdm_dst_pxp_done_error",
+ "ysdm_xcm_rmt_buffer_error",
+ "ysdm_ycm_rmt_buffer_error",
+};
+#else
+#define ysdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg ysdm_int0_bb_a0 = {
+ 0, 26, ysdm_int0_bb_a0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_bb_a0_regs[1] = {
+ &ysdm_int0_bb_a0,
+};
+
+static const u16 ysdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg ysdm_int0_bb_b0 = {
+ 0, 26, ysdm_int0_bb_b0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
+ &ysdm_int0_bb_b0,
+};
+
+static const u16 ysdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg ysdm_int0_k2 = {
+ 0, 28, ysdm_int0_k2_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_k2_regs[1] = {
+ &ysdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysdm_prty_attn_desc[9] = {
+ "ysdm_mem008_i_mem_prty",
+ "ysdm_mem007_i_mem_prty",
+ "ysdm_mem006_i_mem_prty",
+ "ysdm_mem005_i_mem_prty",
+ "ysdm_mem002_i_mem_prty",
+ "ysdm_mem009_i_mem_prty",
+ "ysdm_mem001_i_mem_prty",
+ "ysdm_mem003_i_mem_prty",
+ "ysdm_mem004_i_mem_prty",
+};
+#else
+#define ysdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysdm_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_bb_a0 = {
+ 0, 9, ysdm_prty1_bb_a0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_bb_a0_regs[1] = {
+ &ysdm_prty1_bb_a0,
+};
+
+static const u16 ysdm_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_bb_b0 = {
+ 0, 9, ysdm_prty1_bb_b0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
+ &ysdm_prty1_bb_b0,
+};
+
+static const u16 ysdm_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_k2 = {
+ 0, 9, ysdm_prty1_k2_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_k2_regs[1] = {
+ &ysdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psdm_int_attn_desc[28] = {
+ "psdm_address_error",
+ "psdm_inp_queue_error",
+ "psdm_delay_fifo_error",
+ "psdm_async_host_error",
+ "psdm_prm_fifo_error",
+ "psdm_ccfc_load_pend_error",
+ "psdm_tcfc_load_pend_error",
+ "psdm_dst_int_ram_wait_error",
+ "psdm_dst_pas_buf_wait_error",
+ "psdm_dst_pxp_immed_error",
+ "psdm_dst_pxp_dst_pend_error",
+ "psdm_dst_brb_src_pend_error",
+ "psdm_dst_brb_src_addr_error",
+ "psdm_rsp_brb_pend_error",
+ "psdm_rsp_int_ram_pend_error",
+ "psdm_rsp_brb_rd_data_error",
+ "psdm_rsp_int_ram_rd_data_error",
+ "psdm_rsp_pxp_rd_data_error",
+ "psdm_cm_delay_error",
+ "psdm_sh_delay_error",
+ "psdm_cmpl_pend_error",
+ "psdm_cprm_pend_error",
+ "psdm_timer_addr_error",
+ "psdm_timer_pend_error",
+ "psdm_dorq_dpm_error",
+ "psdm_dst_pxp_done_error",
+ "psdm_xcm_rmt_buffer_error",
+ "psdm_ycm_rmt_buffer_error",
+};
+#else
+#define psdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 psdm_int0_bb_a0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg psdm_int0_bb_a0 = {
+ 0, 26, psdm_int0_bb_a0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_bb_a0_regs[1] = {
+ &psdm_int0_bb_a0,
+};
+
+static const u16 psdm_int0_bb_b0_attn_idx[26] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg psdm_int0_bb_b0 = {
+ 0, 26, psdm_int0_bb_b0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
+ &psdm_int0_bb_b0,
+};
+
+static const u16 psdm_int0_k2_attn_idx[28] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg psdm_int0_k2 = {
+ 0, 28, psdm_int0_k2_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_k2_regs[1] = {
+ &psdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psdm_prty_attn_desc[9] = {
+ "psdm_mem008_i_mem_prty",
+ "psdm_mem007_i_mem_prty",
+ "psdm_mem006_i_mem_prty",
+ "psdm_mem005_i_mem_prty",
+ "psdm_mem002_i_mem_prty",
+ "psdm_mem009_i_mem_prty",
+ "psdm_mem001_i_mem_prty",
+ "psdm_mem003_i_mem_prty",
+ "psdm_mem004_i_mem_prty",
+};
+#else
+#define psdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 psdm_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_bb_a0 = {
+ 0, 9, psdm_prty1_bb_a0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_bb_a0_regs[1] = {
+ &psdm_prty1_bb_a0,
+};
+
+static const u16 psdm_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_bb_b0 = {
+ 0, 9, psdm_prty1_bb_b0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
+ &psdm_prty1_bb_b0,
+};
+
+static const u16 psdm_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_k2 = {
+ 0, 9, psdm_prty1_k2_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_k2_regs[1] = {
+ &psdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsem_int_attn_desc[46] = {
+ "tsem_address_error",
+ "tsem_fic_last_error",
+ "tsem_fic_length_error",
+ "tsem_fic_fifo_error",
+ "tsem_pas_buf_fifo_error",
+ "tsem_sync_fin_pop_error",
+ "tsem_sync_dra_wr_push_error",
+ "tsem_sync_dra_wr_pop_error",
+ "tsem_sync_dra_rd_push_error",
+ "tsem_sync_dra_rd_pop_error",
+ "tsem_sync_fin_push_error",
+ "tsem_sem_fast_address_error",
+ "tsem_cam_lsb_inp_fifo",
+ "tsem_cam_msb_inp_fifo",
+ "tsem_cam_out_fifo",
+ "tsem_fin_fifo",
+ "tsem_thread_fifo_error",
+ "tsem_thread_overrun",
+ "tsem_sync_ext_store_push_error",
+ "tsem_sync_ext_store_pop_error",
+ "tsem_sync_ext_load_push_error",
+ "tsem_sync_ext_load_pop_error",
+ "tsem_sync_ram_rd_push_error",
+ "tsem_sync_ram_rd_pop_error",
+ "tsem_sync_ram_wr_pop_error",
+ "tsem_sync_ram_wr_push_error",
+ "tsem_sync_dbg_push_error",
+ "tsem_sync_dbg_pop_error",
+ "tsem_dbg_fifo_error",
+ "tsem_cam_msb2_inp_fifo",
+ "tsem_vfc_interrupt",
+ "tsem_vfc_out_fifo_error",
+ "tsem_storm_stack_uf_attn",
+ "tsem_storm_stack_of_attn",
+ "tsem_storm_runtime_error",
+ "tsem_ext_load_pend_wr_error",
+ "tsem_thread_rls_orun_error",
+ "tsem_thread_rls_aloc_error",
+ "tsem_thread_rls_vld_error",
+ "tsem_ext_thread_oor_error",
+ "tsem_ord_id_fifo_error",
+ "tsem_invld_foc_error",
+ "tsem_ext_ld_len_error",
+ "tsem_thrd_ord_fifo_error",
+ "tsem_invld_thrd_ord_error",
+ "tsem_fast_memory_address_error",
+};
+#else
+#define tsem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_bb_a0 = {
+ 0, 32, tsem_int0_bb_a0_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_bb_a0 = {
+ 1, 13, tsem_int1_bb_a0_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_a0 = {
+ 2, 1, tsem_fast_memory_int0_bb_a0_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048, 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_bb_a0_regs[3] = {
+ &tsem_int0_bb_a0, &tsem_int1_bb_a0, &tsem_fast_memory_int0_bb_a0,
+};
+
+static const u16 tsem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_bb_b0 = {
+ 0, 32, tsem_int0_bb_b0_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_bb_b0 = {
+ 1, 13, tsem_int1_bb_b0_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
+ 2, 1, tsem_fast_memory_int0_bb_b0_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048, 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
+ &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0,
+};
+
+static const u16 tsem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_k2 = {
+ 0, 32, tsem_int0_k2_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+ 0x1700044
+};
+
+static const u16 tsem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_k2 = {
+ 1, 13, tsem_int1_k2_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+ 0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_k2 = {
+ 2, 1, tsem_fast_memory_int0_k2_attn_idx, 0x1740040, 0x174004c,
+ 0x1740048,
+ 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_k2_regs[3] = {
+ &tsem_int0_k2, &tsem_int1_k2, &tsem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsem_prty_attn_desc[23] = {
+ "tsem_vfc_rbc_parity_error",
+ "tsem_storm_rf_parity_error",
+ "tsem_reg_gen_parity_error",
+ "tsem_mem005_i_ecc_0_rf_int",
+ "tsem_mem005_i_ecc_1_rf_int",
+ "tsem_mem004_i_mem_prty",
+ "tsem_mem002_i_mem_prty",
+ "tsem_mem003_i_mem_prty",
+ "tsem_mem001_i_mem_prty",
+ "tsem_fast_memory_mem024_i_mem_prty",
+ "tsem_fast_memory_mem023_i_mem_prty",
+ "tsem_fast_memory_mem022_i_mem_prty",
+ "tsem_fast_memory_mem021_i_mem_prty",
+ "tsem_fast_memory_mem020_i_mem_prty",
+ "tsem_fast_memory_mem019_i_mem_prty",
+ "tsem_fast_memory_mem018_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "tsem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "tsem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "tsem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define tsem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_bb_a0 = {
+ 0, 3, tsem_prty0_bb_a0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_bb_a0 = {
+ 1, 6, tsem_prty1_bb_a0_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_a0 = {
+ 2, 6, tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x174a200,
+ 0x174a20c, 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_bb_a0_regs[3] = {
+ &tsem_prty0_bb_a0, &tsem_prty1_bb_a0,
+ &tsem_fast_memory_vfc_config_prty1_bb_a0,
+};
+
+static const u16 tsem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_bb_b0 = {
+ 0, 3, tsem_prty0_bb_b0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_bb_b0 = {
+ 1, 6, tsem_prty1_bb_b0_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x174a200,
+ 0x174a20c, 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
+ &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
+ &tsem_fast_memory_vfc_config_prty1_bb_b0,
+};
+
+static const u16 tsem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_k2 = {
+ 0, 3, tsem_prty0_k2_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+ 0x17000cc
+};
+
+static const u16 tsem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_k2 = {
+ 1, 6, tsem_prty1_k2_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+ 0x1700204
+};
+
+static const u16 tsem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg tsem_fast_memory_prty1_k2 = {
+ 2, 7, tsem_fast_memory_prty1_k2_attn_idx, 0x1740200, 0x174020c,
+ 0x1740208,
+ 0x1740204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = {
+ 16, 17, 18, 19, 20, 21,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_k2 = {
+ 3, 6, tsem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x174a200,
+ 0x174a20c,
+ 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_k2_regs[4] = {
+ &tsem_prty0_k2, &tsem_prty1_k2, &tsem_fast_memory_prty1_k2,
+ &tsem_fast_memory_vfc_config_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msem_int_attn_desc[46] = {
+ "msem_address_error",
+ "msem_fic_last_error",
+ "msem_fic_length_error",
+ "msem_fic_fifo_error",
+ "msem_pas_buf_fifo_error",
+ "msem_sync_fin_pop_error",
+ "msem_sync_dra_wr_push_error",
+ "msem_sync_dra_wr_pop_error",
+ "msem_sync_dra_rd_push_error",
+ "msem_sync_dra_rd_pop_error",
+ "msem_sync_fin_push_error",
+ "msem_sem_fast_address_error",
+ "msem_cam_lsb_inp_fifo",
+ "msem_cam_msb_inp_fifo",
+ "msem_cam_out_fifo",
+ "msem_fin_fifo",
+ "msem_thread_fifo_error",
+ "msem_thread_overrun",
+ "msem_sync_ext_store_push_error",
+ "msem_sync_ext_store_pop_error",
+ "msem_sync_ext_load_push_error",
+ "msem_sync_ext_load_pop_error",
+ "msem_sync_ram_rd_push_error",
+ "msem_sync_ram_rd_pop_error",
+ "msem_sync_ram_wr_pop_error",
+ "msem_sync_ram_wr_push_error",
+ "msem_sync_dbg_push_error",
+ "msem_sync_dbg_pop_error",
+ "msem_dbg_fifo_error",
+ "msem_cam_msb2_inp_fifo",
+ "msem_vfc_interrupt",
+ "msem_vfc_out_fifo_error",
+ "msem_storm_stack_uf_attn",
+ "msem_storm_stack_of_attn",
+ "msem_storm_runtime_error",
+ "msem_ext_load_pend_wr_error",
+ "msem_thread_rls_orun_error",
+ "msem_thread_rls_aloc_error",
+ "msem_thread_rls_vld_error",
+ "msem_ext_thread_oor_error",
+ "msem_ord_id_fifo_error",
+ "msem_invld_foc_error",
+ "msem_ext_ld_len_error",
+ "msem_thrd_ord_fifo_error",
+ "msem_invld_thrd_ord_error",
+ "msem_fast_memory_address_error",
+};
+#else
+#define msem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 msem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_bb_a0 = {
+ 0, 32, msem_int0_bb_a0_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_bb_a0 = {
+ 1, 13, msem_int1_bb_a0_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_a0 = {
+ 2, 1, msem_fast_memory_int0_bb_a0_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048, 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_bb_a0_regs[3] = {
+ &msem_int0_bb_a0, &msem_int1_bb_a0, &msem_fast_memory_int0_bb_a0,
+};
+
+static const u16 msem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_bb_b0 = {
+ 0, 32, msem_int0_bb_b0_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_bb_b0 = {
+ 1, 13, msem_int1_bb_b0_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
+ 2, 1, msem_fast_memory_int0_bb_b0_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048, 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
+ &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0,
+};
+
+static const u16 msem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_k2 = {
+ 0, 32, msem_int0_k2_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+ 0x1800044
+};
+
+static const u16 msem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_k2 = {
+ 1, 13, msem_int1_k2_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+ 0x1800054
+};
+
+static const u16 msem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_k2 = {
+ 2, 1, msem_fast_memory_int0_k2_attn_idx, 0x1840040, 0x184004c,
+ 0x1840048,
+ 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_k2_regs[3] = {
+ &msem_int0_k2, &msem_int1_k2, &msem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msem_prty_attn_desc[23] = {
+ "msem_vfc_rbc_parity_error",
+ "msem_storm_rf_parity_error",
+ "msem_reg_gen_parity_error",
+ "msem_mem005_i_ecc_0_rf_int",
+ "msem_mem005_i_ecc_1_rf_int",
+ "msem_mem004_i_mem_prty",
+ "msem_mem002_i_mem_prty",
+ "msem_mem003_i_mem_prty",
+ "msem_mem001_i_mem_prty",
+ "msem_fast_memory_mem024_i_mem_prty",
+ "msem_fast_memory_mem023_i_mem_prty",
+ "msem_fast_memory_mem022_i_mem_prty",
+ "msem_fast_memory_mem021_i_mem_prty",
+ "msem_fast_memory_mem020_i_mem_prty",
+ "msem_fast_memory_mem019_i_mem_prty",
+ "msem_fast_memory_mem018_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "msem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "msem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "msem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define msem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 msem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_bb_a0 = {
+ 0, 3, msem_prty0_bb_a0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_bb_a0 = {
+ 1, 6, msem_prty1_bb_a0_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static struct attn_hw_reg *msem_prty_bb_a0_regs[2] = {
+ &msem_prty0_bb_a0, &msem_prty1_bb_a0,
+};
+
+static const u16 msem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_bb_b0 = {
+ 0, 3, msem_prty0_bb_b0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_bb_b0 = {
+ 1, 6, msem_prty1_bb_b0_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
+ &msem_prty0_bb_b0, &msem_prty1_bb_b0,
+};
+
+static const u16 msem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_k2 = {
+ 0, 3, msem_prty0_k2_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+ 0x18000cc
+};
+
+static const u16 msem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_k2 = {
+ 1, 6, msem_prty1_k2_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+ 0x1800204
+};
+
+static const u16 msem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg msem_fast_memory_prty1_k2 = {
+ 2, 7, msem_fast_memory_prty1_k2_attn_idx, 0x1840200, 0x184020c,
+ 0x1840208,
+ 0x1840204
+};
+
+static struct attn_hw_reg *msem_prty_k2_regs[3] = {
+ &msem_prty0_k2, &msem_prty1_k2, &msem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usem_int_attn_desc[46] = {
+ "usem_address_error",
+ "usem_fic_last_error",
+ "usem_fic_length_error",
+ "usem_fic_fifo_error",
+ "usem_pas_buf_fifo_error",
+ "usem_sync_fin_pop_error",
+ "usem_sync_dra_wr_push_error",
+ "usem_sync_dra_wr_pop_error",
+ "usem_sync_dra_rd_push_error",
+ "usem_sync_dra_rd_pop_error",
+ "usem_sync_fin_push_error",
+ "usem_sem_fast_address_error",
+ "usem_cam_lsb_inp_fifo",
+ "usem_cam_msb_inp_fifo",
+ "usem_cam_out_fifo",
+ "usem_fin_fifo",
+ "usem_thread_fifo_error",
+ "usem_thread_overrun",
+ "usem_sync_ext_store_push_error",
+ "usem_sync_ext_store_pop_error",
+ "usem_sync_ext_load_push_error",
+ "usem_sync_ext_load_pop_error",
+ "usem_sync_ram_rd_push_error",
+ "usem_sync_ram_rd_pop_error",
+ "usem_sync_ram_wr_pop_error",
+ "usem_sync_ram_wr_push_error",
+ "usem_sync_dbg_push_error",
+ "usem_sync_dbg_pop_error",
+ "usem_dbg_fifo_error",
+ "usem_cam_msb2_inp_fifo",
+ "usem_vfc_interrupt",
+ "usem_vfc_out_fifo_error",
+ "usem_storm_stack_uf_attn",
+ "usem_storm_stack_of_attn",
+ "usem_storm_runtime_error",
+ "usem_ext_load_pend_wr_error",
+ "usem_thread_rls_orun_error",
+ "usem_thread_rls_aloc_error",
+ "usem_thread_rls_vld_error",
+ "usem_ext_thread_oor_error",
+ "usem_ord_id_fifo_error",
+ "usem_invld_foc_error",
+ "usem_ext_ld_len_error",
+ "usem_thrd_ord_fifo_error",
+ "usem_invld_thrd_ord_error",
+ "usem_fast_memory_address_error",
+};
+#else
+#define usem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 usem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_bb_a0 = {
+ 0, 32, usem_int0_bb_a0_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_bb_a0 = {
+ 1, 13, usem_int1_bb_a0_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_a0 = {
+ 2, 1, usem_fast_memory_int0_bb_a0_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048, 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_bb_a0_regs[3] = {
+ &usem_int0_bb_a0, &usem_int1_bb_a0, &usem_fast_memory_int0_bb_a0,
+};
+
+static const u16 usem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_bb_b0 = {
+ 0, 32, usem_int0_bb_b0_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_bb_b0 = {
+ 1, 13, usem_int1_bb_b0_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
+ 2, 1, usem_fast_memory_int0_bb_b0_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048, 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
+ &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0,
+};
+
+static const u16 usem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_k2 = {
+ 0, 32, usem_int0_k2_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+ 0x1900044
+};
+
+static const u16 usem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_k2 = {
+ 1, 13, usem_int1_k2_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+ 0x1900054
+};
+
+static const u16 usem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_k2 = {
+ 2, 1, usem_fast_memory_int0_k2_attn_idx, 0x1940040, 0x194004c,
+ 0x1940048,
+ 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_k2_regs[3] = {
+ &usem_int0_k2, &usem_int1_k2, &usem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usem_prty_attn_desc[23] = {
+ "usem_vfc_rbc_parity_error",
+ "usem_storm_rf_parity_error",
+ "usem_reg_gen_parity_error",
+ "usem_mem005_i_ecc_0_rf_int",
+ "usem_mem005_i_ecc_1_rf_int",
+ "usem_mem004_i_mem_prty",
+ "usem_mem002_i_mem_prty",
+ "usem_mem003_i_mem_prty",
+ "usem_mem001_i_mem_prty",
+ "usem_fast_memory_mem024_i_mem_prty",
+ "usem_fast_memory_mem023_i_mem_prty",
+ "usem_fast_memory_mem022_i_mem_prty",
+ "usem_fast_memory_mem021_i_mem_prty",
+ "usem_fast_memory_mem020_i_mem_prty",
+ "usem_fast_memory_mem019_i_mem_prty",
+ "usem_fast_memory_mem018_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "usem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "usem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "usem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define usem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 usem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_bb_a0 = {
+ 0, 3, usem_prty0_bb_a0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_bb_a0 = {
+ 1, 6, usem_prty1_bb_a0_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static struct attn_hw_reg *usem_prty_bb_a0_regs[2] = {
+ &usem_prty0_bb_a0, &usem_prty1_bb_a0,
+};
+
+static const u16 usem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_bb_b0 = {
+ 0, 3, usem_prty0_bb_b0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_bb_b0 = {
+ 1, 6, usem_prty1_bb_b0_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
+ &usem_prty0_bb_b0, &usem_prty1_bb_b0,
+};
+
+static const u16 usem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_k2 = {
+ 0, 3, usem_prty0_k2_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+ 0x19000cc
+};
+
+static const u16 usem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_k2 = {
+ 1, 6, usem_prty1_k2_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+ 0x1900204
+};
+
+static const u16 usem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg usem_fast_memory_prty1_k2 = {
+ 2, 7, usem_fast_memory_prty1_k2_attn_idx, 0x1940200, 0x194020c,
+ 0x1940208,
+ 0x1940204
+};
+
+static struct attn_hw_reg *usem_prty_k2_regs[3] = {
+ &usem_prty0_k2, &usem_prty1_k2, &usem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsem_int_attn_desc[46] = {
+ "xsem_address_error",
+ "xsem_fic_last_error",
+ "xsem_fic_length_error",
+ "xsem_fic_fifo_error",
+ "xsem_pas_buf_fifo_error",
+ "xsem_sync_fin_pop_error",
+ "xsem_sync_dra_wr_push_error",
+ "xsem_sync_dra_wr_pop_error",
+ "xsem_sync_dra_rd_push_error",
+ "xsem_sync_dra_rd_pop_error",
+ "xsem_sync_fin_push_error",
+ "xsem_sem_fast_address_error",
+ "xsem_cam_lsb_inp_fifo",
+ "xsem_cam_msb_inp_fifo",
+ "xsem_cam_out_fifo",
+ "xsem_fin_fifo",
+ "xsem_thread_fifo_error",
+ "xsem_thread_overrun",
+ "xsem_sync_ext_store_push_error",
+ "xsem_sync_ext_store_pop_error",
+ "xsem_sync_ext_load_push_error",
+ "xsem_sync_ext_load_pop_error",
+ "xsem_sync_ram_rd_push_error",
+ "xsem_sync_ram_rd_pop_error",
+ "xsem_sync_ram_wr_pop_error",
+ "xsem_sync_ram_wr_push_error",
+ "xsem_sync_dbg_push_error",
+ "xsem_sync_dbg_pop_error",
+ "xsem_dbg_fifo_error",
+ "xsem_cam_msb2_inp_fifo",
+ "xsem_vfc_interrupt",
+ "xsem_vfc_out_fifo_error",
+ "xsem_storm_stack_uf_attn",
+ "xsem_storm_stack_of_attn",
+ "xsem_storm_runtime_error",
+ "xsem_ext_load_pend_wr_error",
+ "xsem_thread_rls_orun_error",
+ "xsem_thread_rls_aloc_error",
+ "xsem_thread_rls_vld_error",
+ "xsem_ext_thread_oor_error",
+ "xsem_ord_id_fifo_error",
+ "xsem_invld_foc_error",
+ "xsem_ext_ld_len_error",
+ "xsem_thrd_ord_fifo_error",
+ "xsem_invld_thrd_ord_error",
+ "xsem_fast_memory_address_error",
+};
+#else
+#define xsem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_bb_a0 = {
+ 0, 32, xsem_int0_bb_a0_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_bb_a0 = {
+ 1, 13, xsem_int1_bb_a0_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_a0 = {
+ 2, 1, xsem_fast_memory_int0_bb_a0_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048, 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_bb_a0_regs[3] = {
+ &xsem_int0_bb_a0, &xsem_int1_bb_a0, &xsem_fast_memory_int0_bb_a0,
+};
+
+static const u16 xsem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_bb_b0 = {
+ 0, 32, xsem_int0_bb_b0_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_bb_b0 = {
+ 1, 13, xsem_int1_bb_b0_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
+ 2, 1, xsem_fast_memory_int0_bb_b0_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048, 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
+ &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0,
+};
+
+static const u16 xsem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_k2 = {
+ 0, 32, xsem_int0_k2_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+ 0x1400044
+};
+
+static const u16 xsem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_k2 = {
+ 1, 13, xsem_int1_k2_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+ 0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_k2 = {
+ 2, 1, xsem_fast_memory_int0_k2_attn_idx, 0x1440040, 0x144004c,
+ 0x1440048,
+ 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_k2_regs[3] = {
+ &xsem_int0_k2, &xsem_int1_k2, &xsem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsem_prty_attn_desc[24] = {
+ "xsem_vfc_rbc_parity_error",
+ "xsem_storm_rf_parity_error",
+ "xsem_reg_gen_parity_error",
+ "xsem_mem006_i_ecc_0_rf_int",
+ "xsem_mem006_i_ecc_1_rf_int",
+ "xsem_mem005_i_mem_prty",
+ "xsem_mem002_i_mem_prty",
+ "xsem_mem004_i_mem_prty",
+ "xsem_mem003_i_mem_prty",
+ "xsem_mem001_i_mem_prty",
+ "xsem_fast_memory_mem024_i_mem_prty",
+ "xsem_fast_memory_mem023_i_mem_prty",
+ "xsem_fast_memory_mem022_i_mem_prty",
+ "xsem_fast_memory_mem021_i_mem_prty",
+ "xsem_fast_memory_mem020_i_mem_prty",
+ "xsem_fast_memory_mem019_i_mem_prty",
+ "xsem_fast_memory_mem018_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "xsem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "xsem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "xsem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define xsem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_bb_a0 = {
+ 0, 3, xsem_prty0_bb_a0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_bb_a0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_bb_a0 = {
+ 1, 7, xsem_prty1_bb_a0_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static struct attn_hw_reg *xsem_prty_bb_a0_regs[2] = {
+ &xsem_prty0_bb_a0, &xsem_prty1_bb_a0,
+};
+
+static const u16 xsem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_bb_b0 = {
+ 0, 3, xsem_prty0_bb_b0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_bb_b0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_bb_b0 = {
+ 1, 7, xsem_prty1_bb_b0_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
+ &xsem_prty0_bb_b0, &xsem_prty1_bb_b0,
+};
+
+static const u16 xsem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_k2 = {
+ 0, 3, xsem_prty0_k2_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+ 0x14000cc
+};
+
+static const u16 xsem_prty1_k2_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_k2 = {
+ 1, 7, xsem_prty1_k2_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+ 0x1400204
+};
+
+static const u16 xsem_fast_memory_prty1_k2_attn_idx[7] = {
+ 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg xsem_fast_memory_prty1_k2 = {
+ 2, 7, xsem_fast_memory_prty1_k2_attn_idx, 0x1440200, 0x144020c,
+ 0x1440208,
+ 0x1440204
+};
+
+static struct attn_hw_reg *xsem_prty_k2_regs[3] = {
+ &xsem_prty0_k2, &xsem_prty1_k2, &xsem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysem_int_attn_desc[46] = {
+ "ysem_address_error",
+ "ysem_fic_last_error",
+ "ysem_fic_length_error",
+ "ysem_fic_fifo_error",
+ "ysem_pas_buf_fifo_error",
+ "ysem_sync_fin_pop_error",
+ "ysem_sync_dra_wr_push_error",
+ "ysem_sync_dra_wr_pop_error",
+ "ysem_sync_dra_rd_push_error",
+ "ysem_sync_dra_rd_pop_error",
+ "ysem_sync_fin_push_error",
+ "ysem_sem_fast_address_error",
+ "ysem_cam_lsb_inp_fifo",
+ "ysem_cam_msb_inp_fifo",
+ "ysem_cam_out_fifo",
+ "ysem_fin_fifo",
+ "ysem_thread_fifo_error",
+ "ysem_thread_overrun",
+ "ysem_sync_ext_store_push_error",
+ "ysem_sync_ext_store_pop_error",
+ "ysem_sync_ext_load_push_error",
+ "ysem_sync_ext_load_pop_error",
+ "ysem_sync_ram_rd_push_error",
+ "ysem_sync_ram_rd_pop_error",
+ "ysem_sync_ram_wr_pop_error",
+ "ysem_sync_ram_wr_push_error",
+ "ysem_sync_dbg_push_error",
+ "ysem_sync_dbg_pop_error",
+ "ysem_dbg_fifo_error",
+ "ysem_cam_msb2_inp_fifo",
+ "ysem_vfc_interrupt",
+ "ysem_vfc_out_fifo_error",
+ "ysem_storm_stack_uf_attn",
+ "ysem_storm_stack_of_attn",
+ "ysem_storm_runtime_error",
+ "ysem_ext_load_pend_wr_error",
+ "ysem_thread_rls_orun_error",
+ "ysem_thread_rls_aloc_error",
+ "ysem_thread_rls_vld_error",
+ "ysem_ext_thread_oor_error",
+ "ysem_ord_id_fifo_error",
+ "ysem_invld_foc_error",
+ "ysem_ext_ld_len_error",
+ "ysem_thrd_ord_fifo_error",
+ "ysem_invld_thrd_ord_error",
+ "ysem_fast_memory_address_error",
+};
+#else
+#define ysem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_bb_a0 = {
+ 0, 32, ysem_int0_bb_a0_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_bb_a0 = {
+ 1, 13, ysem_int1_bb_a0_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_a0 = {
+ 2, 1, ysem_fast_memory_int0_bb_a0_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048, 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_bb_a0_regs[3] = {
+ &ysem_int0_bb_a0, &ysem_int1_bb_a0, &ysem_fast_memory_int0_bb_a0,
+};
+
+static const u16 ysem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_bb_b0 = {
+ 0, 32, ysem_int0_bb_b0_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_bb_b0 = {
+ 1, 13, ysem_int1_bb_b0_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
+ 2, 1, ysem_fast_memory_int0_bb_b0_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048, 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
+ &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0,
+};
+
+static const u16 ysem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_k2 = {
+ 0, 32, ysem_int0_k2_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+ 0x1500044
+};
+
+static const u16 ysem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_k2 = {
+ 1, 13, ysem_int1_k2_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+ 0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_k2 = {
+ 2, 1, ysem_fast_memory_int0_k2_attn_idx, 0x1540040, 0x154004c,
+ 0x1540048,
+ 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_k2_regs[3] = {
+ &ysem_int0_k2, &ysem_int1_k2, &ysem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysem_prty_attn_desc[24] = {
+ "ysem_vfc_rbc_parity_error",
+ "ysem_storm_rf_parity_error",
+ "ysem_reg_gen_parity_error",
+ "ysem_mem006_i_ecc_0_rf_int",
+ "ysem_mem006_i_ecc_1_rf_int",
+ "ysem_mem005_i_mem_prty",
+ "ysem_mem002_i_mem_prty",
+ "ysem_mem004_i_mem_prty",
+ "ysem_mem003_i_mem_prty",
+ "ysem_mem001_i_mem_prty",
+ "ysem_fast_memory_mem024_i_mem_prty",
+ "ysem_fast_memory_mem023_i_mem_prty",
+ "ysem_fast_memory_mem022_i_mem_prty",
+ "ysem_fast_memory_mem021_i_mem_prty",
+ "ysem_fast_memory_mem020_i_mem_prty",
+ "ysem_fast_memory_mem019_i_mem_prty",
+ "ysem_fast_memory_mem018_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "ysem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "ysem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "ysem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define ysem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_bb_a0 = {
+ 0, 3, ysem_prty0_bb_a0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_bb_a0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_bb_a0 = {
+ 1, 7, ysem_prty1_bb_a0_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static struct attn_hw_reg *ysem_prty_bb_a0_regs[2] = {
+ &ysem_prty0_bb_a0, &ysem_prty1_bb_a0,
+};
+
+static const u16 ysem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_bb_b0 = {
+ 0, 3, ysem_prty0_bb_b0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_bb_b0_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_bb_b0 = {
+ 1, 7, ysem_prty1_bb_b0_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
+ &ysem_prty0_bb_b0, &ysem_prty1_bb_b0,
+};
+
+static const u16 ysem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_k2 = {
+ 0, 3, ysem_prty0_k2_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+ 0x15000cc
+};
+
+static const u16 ysem_prty1_k2_attn_idx[7] = {
+ 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_k2 = {
+ 1, 7, ysem_prty1_k2_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+ 0x1500204
+};
+
+static const u16 ysem_fast_memory_prty1_k2_attn_idx[7] = {
+ 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ysem_fast_memory_prty1_k2 = {
+ 2, 7, ysem_fast_memory_prty1_k2_attn_idx, 0x1540200, 0x154020c,
+ 0x1540208,
+ 0x1540204
+};
+
+static struct attn_hw_reg *ysem_prty_k2_regs[3] = {
+ &ysem_prty0_k2, &ysem_prty1_k2, &ysem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psem_int_attn_desc[46] = {
+ "psem_address_error",
+ "psem_fic_last_error",
+ "psem_fic_length_error",
+ "psem_fic_fifo_error",
+ "psem_pas_buf_fifo_error",
+ "psem_sync_fin_pop_error",
+ "psem_sync_dra_wr_push_error",
+ "psem_sync_dra_wr_pop_error",
+ "psem_sync_dra_rd_push_error",
+ "psem_sync_dra_rd_pop_error",
+ "psem_sync_fin_push_error",
+ "psem_sem_fast_address_error",
+ "psem_cam_lsb_inp_fifo",
+ "psem_cam_msb_inp_fifo",
+ "psem_cam_out_fifo",
+ "psem_fin_fifo",
+ "psem_thread_fifo_error",
+ "psem_thread_overrun",
+ "psem_sync_ext_store_push_error",
+ "psem_sync_ext_store_pop_error",
+ "psem_sync_ext_load_push_error",
+ "psem_sync_ext_load_pop_error",
+ "psem_sync_ram_rd_push_error",
+ "psem_sync_ram_rd_pop_error",
+ "psem_sync_ram_wr_pop_error",
+ "psem_sync_ram_wr_push_error",
+ "psem_sync_dbg_push_error",
+ "psem_sync_dbg_pop_error",
+ "psem_dbg_fifo_error",
+ "psem_cam_msb2_inp_fifo",
+ "psem_vfc_interrupt",
+ "psem_vfc_out_fifo_error",
+ "psem_storm_stack_uf_attn",
+ "psem_storm_stack_of_attn",
+ "psem_storm_runtime_error",
+ "psem_ext_load_pend_wr_error",
+ "psem_thread_rls_orun_error",
+ "psem_thread_rls_aloc_error",
+ "psem_thread_rls_vld_error",
+ "psem_ext_thread_oor_error",
+ "psem_ord_id_fifo_error",
+ "psem_invld_foc_error",
+ "psem_ext_ld_len_error",
+ "psem_thrd_ord_fifo_error",
+ "psem_invld_thrd_ord_error",
+ "psem_fast_memory_address_error",
+};
+#else
+#define psem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 psem_int0_bb_a0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_bb_a0 = {
+ 0, 32, psem_int0_bb_a0_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_bb_a0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_bb_a0 = {
+ 1, 13, psem_int1_bb_a0_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_bb_a0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_a0 = {
+ 2, 1, psem_fast_memory_int0_bb_a0_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048, 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_bb_a0_regs[3] = {
+ &psem_int0_bb_a0, &psem_int1_bb_a0, &psem_fast_memory_int0_bb_a0,
+};
+
+static const u16 psem_int0_bb_b0_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_bb_b0 = {
+ 0, 32, psem_int0_bb_b0_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_bb_b0_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_bb_b0 = {
+ 1, 13, psem_int1_bb_b0_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_bb_b0_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
+ 2, 1, psem_fast_memory_int0_bb_b0_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048, 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
+ &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0,
+};
+
+static const u16 psem_int0_k2_attn_idx[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_k2 = {
+ 0, 32, psem_int0_k2_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+ 0x1600044
+};
+
+static const u16 psem_int1_k2_attn_idx[13] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_k2 = {
+ 1, 13, psem_int1_k2_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+ 0x1600054
+};
+
+static const u16 psem_fast_memory_int0_k2_attn_idx[1] = {
+ 45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_k2 = {
+ 2, 1, psem_fast_memory_int0_k2_attn_idx, 0x1640040, 0x164004c,
+ 0x1640048,
+ 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_k2_regs[3] = {
+ &psem_int0_k2, &psem_int1_k2, &psem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psem_prty_attn_desc[23] = {
+ "psem_vfc_rbc_parity_error",
+ "psem_storm_rf_parity_error",
+ "psem_reg_gen_parity_error",
+ "psem_mem005_i_ecc_0_rf_int",
+ "psem_mem005_i_ecc_1_rf_int",
+ "psem_mem004_i_mem_prty",
+ "psem_mem002_i_mem_prty",
+ "psem_mem003_i_mem_prty",
+ "psem_mem001_i_mem_prty",
+ "psem_fast_memory_mem024_i_mem_prty",
+ "psem_fast_memory_mem023_i_mem_prty",
+ "psem_fast_memory_mem022_i_mem_prty",
+ "psem_fast_memory_mem021_i_mem_prty",
+ "psem_fast_memory_mem020_i_mem_prty",
+ "psem_fast_memory_mem019_i_mem_prty",
+ "psem_fast_memory_mem018_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+ "psem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+ "psem_fast_memory_vfc_config_mem006_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem001_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem004_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem003_i_mem_prty",
+ "psem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define psem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 psem_prty0_bb_a0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_bb_a0 = {
+ 0, 3, psem_prty0_bb_a0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_bb_a0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_bb_a0 = {
+ 1, 6, psem_prty1_bb_a0_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_a0 = {
+ 2, 6, psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x164a200,
+ 0x164a20c, 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_bb_a0_regs[3] = {
+ &psem_prty0_bb_a0, &psem_prty1_bb_a0,
+ &psem_fast_memory_vfc_config_prty1_bb_a0,
+};
+
+static const u16 psem_prty0_bb_b0_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_bb_b0 = {
+ 0, 3, psem_prty0_bb_b0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_bb_b0_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_bb_b0 = {
+ 1, 6, psem_prty1_bb_b0_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = {
+ 16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x164a200,
+ 0x164a20c, 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
+ &psem_prty0_bb_b0, &psem_prty1_bb_b0,
+ &psem_fast_memory_vfc_config_prty1_bb_b0,
+};
+
+static const u16 psem_prty0_k2_attn_idx[3] = {
+ 0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_k2 = {
+ 0, 3, psem_prty0_k2_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+ 0x16000cc
+};
+
+static const u16 psem_prty1_k2_attn_idx[6] = {
+ 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_k2 = {
+ 1, 6, psem_prty1_k2_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+ 0x1600204
+};
+
+static const u16 psem_fast_memory_prty1_k2_attn_idx[7] = {
+ 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg psem_fast_memory_prty1_k2 = {
+ 2, 7, psem_fast_memory_prty1_k2_attn_idx, 0x1640200, 0x164020c,
+ 0x1640208,
+ 0x1640204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = {
+ 16, 17, 18, 19, 20, 21,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_k2 = {
+ 3, 6, psem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x164a200,
+ 0x164a20c,
+ 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_k2_regs[4] = {
+ &psem_prty0_k2, &psem_prty1_k2, &psem_fast_memory_prty1_k2,
+ &psem_fast_memory_vfc_config_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rss_int_attn_desc[12] = {
+ "rss_address_error",
+ "rss_msg_inp_cnt_error",
+ "rss_msg_out_cnt_error",
+ "rss_inp_state_error",
+ "rss_out_state_error",
+ "rss_main_state_error",
+ "rss_calc_state_error",
+ "rss_inp_fifo_error",
+ "rss_cmd_fifo_error",
+ "rss_msg_fifo_error",
+ "rss_rsp_fifo_error",
+ "rss_hdr_fifo_error",
+};
+#else
+#define rss_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rss_int0_bb_a0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_bb_a0 = {
+ 0, 12, rss_int0_bb_a0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_bb_a0_regs[1] = {
+ &rss_int0_bb_a0,
+};
+
+static const u16 rss_int0_bb_b0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_bb_b0 = {
+ 0, 12, rss_int0_bb_b0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
+ &rss_int0_bb_b0,
+};
+
+static const u16 rss_int0_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_k2 = {
+ 0, 12, rss_int0_k2_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_k2_regs[1] = {
+ &rss_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rss_prty_attn_desc[4] = {
+ "rss_mem002_i_ecc_rf_int",
+ "rss_mem001_i_ecc_rf_int",
+ "rss_mem003_i_mem_prty",
+ "rss_mem004_i_mem_prty",
+};
+#else
+#define rss_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rss_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_bb_a0 = {
+ 0, 4, rss_prty1_bb_a0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_bb_a0_regs[1] = {
+ &rss_prty1_bb_a0,
+};
+
+static const u16 rss_prty1_bb_b0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_bb_b0 = {
+ 0, 4, rss_prty1_bb_b0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
+ &rss_prty1_bb_b0,
+};
+
+static const u16 rss_prty1_k2_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_k2 = {
+ 0, 4, rss_prty1_k2_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_k2_regs[1] = {
+ &rss_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tmld_int_attn_desc[6] = {
+ "tmld_address_error",
+ "tmld_ld_hdr_err",
+ "tmld_ld_seg_msg_err",
+ "tmld_ld_tid_mini_cache_err",
+ "tmld_ld_cid_mini_cache_err",
+ "tmld_ld_long_message",
+};
+#else
+#define tmld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tmld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_bb_a0 = {
+ 0, 6, tmld_int0_bb_a0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_bb_a0_regs[1] = {
+ &tmld_int0_bb_a0,
+};
+
+static const u16 tmld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_bb_b0 = {
+ 0, 6, tmld_int0_bb_b0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
+ &tmld_int0_bb_b0,
+};
+
+static const u16 tmld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_k2 = {
+ 0, 6, tmld_int0_k2_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_k2_regs[1] = {
+ &tmld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tmld_prty_attn_desc[8] = {
+ "tmld_mem006_i_ecc_rf_int",
+ "tmld_mem002_i_ecc_rf_int",
+ "tmld_mem003_i_mem_prty",
+ "tmld_mem004_i_mem_prty",
+ "tmld_mem007_i_mem_prty",
+ "tmld_mem008_i_mem_prty",
+ "tmld_mem005_i_mem_prty",
+ "tmld_mem001_i_mem_prty",
+};
+#else
+#define tmld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tmld_prty1_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_bb_a0 = {
+ 0, 8, tmld_prty1_bb_a0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_bb_a0_regs[1] = {
+ &tmld_prty1_bb_a0,
+};
+
+static const u16 tmld_prty1_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_bb_b0 = {
+ 0, 8, tmld_prty1_bb_b0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
+ &tmld_prty1_bb_b0,
+};
+
+static const u16 tmld_prty1_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_k2 = {
+ 0, 8, tmld_prty1_k2_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_k2_regs[1] = {
+ &tmld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *muld_int_attn_desc[6] = {
+ "muld_address_error",
+ "muld_ld_hdr_err",
+ "muld_ld_seg_msg_err",
+ "muld_ld_tid_mini_cache_err",
+ "muld_ld_cid_mini_cache_err",
+ "muld_ld_long_message",
+};
+#else
+#define muld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 muld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_bb_a0 = {
+ 0, 6, muld_int0_bb_a0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_bb_a0_regs[1] = {
+ &muld_int0_bb_a0,
+};
+
+static const u16 muld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_bb_b0 = {
+ 0, 6, muld_int0_bb_b0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
+ &muld_int0_bb_b0,
+};
+
+static const u16 muld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_k2 = {
+ 0, 6, muld_int0_k2_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_k2_regs[1] = {
+ &muld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *muld_prty_attn_desc[10] = {
+ "muld_mem005_i_ecc_rf_int",
+ "muld_mem001_i_ecc_rf_int",
+ "muld_mem008_i_ecc_rf_int",
+ "muld_mem007_i_ecc_rf_int",
+ "muld_mem002_i_mem_prty",
+ "muld_mem003_i_mem_prty",
+ "muld_mem009_i_mem_prty",
+ "muld_mem010_i_mem_prty",
+ "muld_mem004_i_mem_prty",
+ "muld_mem006_i_mem_prty",
+};
+#else
+#define muld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 muld_prty1_bb_a0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_bb_a0 = {
+ 0, 10, muld_prty1_bb_a0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208,
+ 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_bb_a0_regs[1] = {
+ &muld_prty1_bb_a0,
+};
+
+static const u16 muld_prty1_bb_b0_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_bb_b0 = {
+ 0, 10, muld_prty1_bb_b0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208,
+ 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
+ &muld_prty1_bb_b0,
+};
+
+static const u16 muld_prty1_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_k2 = {
+ 0, 10, muld_prty1_k2_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_k2_regs[1] = {
+ &muld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *yuld_int_attn_desc[6] = {
+ "yuld_address_error",
+ "yuld_ld_hdr_err",
+ "yuld_ld_seg_msg_err",
+ "yuld_ld_tid_mini_cache_err",
+ "yuld_ld_cid_mini_cache_err",
+ "yuld_ld_long_message",
+};
+#else
+#define yuld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 yuld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_bb_a0 = {
+ 0, 6, yuld_int0_bb_a0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_bb_a0_regs[1] = {
+ &yuld_int0_bb_a0,
+};
+
+static const u16 yuld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_bb_b0 = {
+ 0, 6, yuld_int0_bb_b0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
+ &yuld_int0_bb_b0,
+};
+
+static const u16 yuld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_k2 = {
+ 0, 6, yuld_int0_k2_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_k2_regs[1] = {
+ &yuld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *yuld_prty_attn_desc[6] = {
+ "yuld_mem001_i_mem_prty",
+ "yuld_mem002_i_mem_prty",
+ "yuld_mem005_i_mem_prty",
+ "yuld_mem006_i_mem_prty",
+ "yuld_mem004_i_mem_prty",
+ "yuld_mem003_i_mem_prty",
+};
+#else
+#define yuld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 yuld_prty1_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_bb_a0 = {
+ 0, 6, yuld_prty1_bb_a0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_bb_a0_regs[1] = {
+ &yuld_prty1_bb_a0,
+};
+
+static const u16 yuld_prty1_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_bb_b0 = {
+ 0, 6, yuld_prty1_bb_b0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
+ &yuld_prty1_bb_b0,
+};
+
+static const u16 yuld_prty1_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_k2 = {
+ 0, 6, yuld_prty1_k2_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_k2_regs[1] = {
+ &yuld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xyld_int_attn_desc[6] = {
+ "xyld_address_error",
+ "xyld_ld_hdr_err",
+ "xyld_ld_seg_msg_err",
+ "xyld_ld_tid_mini_cache_err",
+ "xyld_ld_cid_mini_cache_err",
+ "xyld_ld_long_message",
+};
+#else
+#define xyld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xyld_int0_bb_a0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_bb_a0 = {
+ 0, 6, xyld_int0_bb_a0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_bb_a0_regs[1] = {
+ &xyld_int0_bb_a0,
+};
+
+static const u16 xyld_int0_bb_b0_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_bb_b0 = {
+ 0, 6, xyld_int0_bb_b0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
+ &xyld_int0_bb_b0,
+};
+
+static const u16 xyld_int0_k2_attn_idx[6] = {
+ 0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_k2 = {
+ 0, 6, xyld_int0_k2_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_k2_regs[1] = {
+ &xyld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xyld_prty_attn_desc[9] = {
+ "xyld_mem004_i_ecc_rf_int",
+ "xyld_mem006_i_ecc_rf_int",
+ "xyld_mem001_i_mem_prty",
+ "xyld_mem002_i_mem_prty",
+ "xyld_mem008_i_mem_prty",
+ "xyld_mem009_i_mem_prty",
+ "xyld_mem003_i_mem_prty",
+ "xyld_mem005_i_mem_prty",
+ "xyld_mem007_i_mem_prty",
+};
+#else
+#define xyld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xyld_prty1_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_bb_a0 = {
+ 0, 9, xyld_prty1_bb_a0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_bb_a0_regs[1] = {
+ &xyld_prty1_bb_a0,
+};
+
+static const u16 xyld_prty1_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_bb_b0 = {
+ 0, 9, xyld_prty1_bb_b0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
+ &xyld_prty1_bb_b0,
+};
+
+static const u16 xyld_prty1_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_k2 = {
+ 0, 9, xyld_prty1_k2_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_k2_regs[1] = {
+ &xyld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prm_int_attn_desc[11] = {
+ "prm_address_error",
+ "prm_ififo_error",
+ "prm_immed_fifo_error",
+ "prm_ofst_pend_error",
+ "prm_pad_pend_error",
+ "prm_pbinp_pend_error",
+ "prm_tag_pend_error",
+ "prm_mstorm_eop_err",
+ "prm_ustorm_eop_err",
+ "prm_mstorm_que_err",
+ "prm_ustorm_que_err",
+};
+#else
+#define prm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 prm_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_bb_a0 = {
+ 0, 11, prm_int0_bb_a0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_bb_a0_regs[1] = {
+ &prm_int0_bb_a0,
+};
+
+static const u16 prm_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_bb_b0 = {
+ 0, 11, prm_int0_bb_b0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
+ &prm_int0_bb_b0,
+};
+
+static const u16 prm_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_k2 = {
+ 0, 11, prm_int0_k2_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_k2_regs[1] = {
+ &prm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prm_prty_attn_desc[30] = {
+ "prm_datapath_registers",
+ "prm_mem012_i_ecc_rf_int",
+ "prm_mem013_i_ecc_rf_int",
+ "prm_mem014_i_ecc_rf_int",
+ "prm_mem020_i_ecc_rf_int",
+ "prm_mem004_i_mem_prty",
+ "prm_mem024_i_mem_prty",
+ "prm_mem016_i_mem_prty",
+ "prm_mem017_i_mem_prty",
+ "prm_mem008_i_mem_prty",
+ "prm_mem009_i_mem_prty",
+ "prm_mem010_i_mem_prty",
+ "prm_mem015_i_mem_prty",
+ "prm_mem011_i_mem_prty",
+ "prm_mem003_i_mem_prty",
+ "prm_mem002_i_mem_prty",
+ "prm_mem005_i_mem_prty",
+ "prm_mem023_i_mem_prty",
+ "prm_mem006_i_mem_prty",
+ "prm_mem007_i_mem_prty",
+ "prm_mem001_i_mem_prty",
+ "prm_mem022_i_mem_prty",
+ "prm_mem021_i_mem_prty",
+ "prm_mem019_i_mem_prty",
+ "prm_mem015_i_ecc_rf_int",
+ "prm_mem021_i_ecc_rf_int",
+ "prm_mem025_i_mem_prty",
+ "prm_mem018_i_mem_prty",
+ "prm_mem012_i_mem_prty",
+ "prm_mem020_i_mem_prty",
+};
+#else
+#define prm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 prm_prty1_bb_a0_attn_idx[25] = {
+ 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24,
+ 25, 26, 27, 28, 29,
+};
+
+static struct attn_hw_reg prm_prty1_bb_a0 = {
+ 0, 25, prm_prty1_bb_a0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_bb_a0_regs[1] = {
+ &prm_prty1_bb_a0,
+};
+
+static const u16 prm_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prm_prty0_bb_b0 = {
+ 0, 1, prm_prty0_bb_b0_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054
+};
+
+static const u16 prm_prty1_bb_b0_attn_idx[24] = {
+ 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 24, 25,
+ 26, 27, 28, 29,
+};
+
+static struct attn_hw_reg prm_prty1_bb_b0 = {
+ 1, 24, prm_prty1_bb_b0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
+ &prm_prty0_bb_b0, &prm_prty1_bb_b0,
+};
+
+static const u16 prm_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg prm_prty0_k2 = {
+ 0, 1, prm_prty0_k2_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054
+};
+
+static const u16 prm_prty1_k2_attn_idx[23] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23,
+};
+
+static struct attn_hw_reg prm_prty1_k2 = {
+ 1, 23, prm_prty1_k2_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_k2_regs[2] = {
+ &prm_prty0_k2, &prm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb1_int_attn_desc[9] = {
+ "pbf_pb1_address_error",
+ "pbf_pb1_eop_error",
+ "pbf_pb1_ififo_error",
+ "pbf_pb1_pfifo_error",
+ "pbf_pb1_db_buf_error",
+ "pbf_pb1_th_exec_error",
+ "pbf_pb1_tq_error_wr",
+ "pbf_pb1_tq_error_rd_th",
+ "pbf_pb1_tq_error_rd_ih",
+};
+#else
+#define pbf_pb1_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb1_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_a0 = {
+ 0, 9, pbf_pb1_int0_bb_a0_attn_idx, 0xda0040, 0xda004c, 0xda0048,
+ 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_a0_regs[1] = {
+ &pbf_pb1_int0_bb_a0,
+};
+
+static const u16 pbf_pb1_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
+ 0, 9, pbf_pb1_int0_bb_b0_attn_idx, 0xda0040, 0xda004c, 0xda0048,
+ 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
+ &pbf_pb1_int0_bb_b0,
+};
+
+static const u16 pbf_pb1_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_k2 = {
+ 0, 9, pbf_pb1_int0_k2_attn_idx, 0xda0040, 0xda004c, 0xda0048, 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_k2_regs[1] = {
+ &pbf_pb1_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb1_prty_attn_desc[1] = {
+ "pbf_pb1_datapath_registers",
+};
+#else
+#define pbf_pb1_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb1_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
+ 0, 1, pbf_pb1_prty0_bb_b0_attn_idx, 0xda0050, 0xda005c, 0xda0058,
+ 0xda0054
+};
+
+static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
+ &pbf_pb1_prty0_bb_b0,
+};
+
+static const u16 pbf_pb1_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb1_prty0_k2 = {
+ 0, 1, pbf_pb1_prty0_k2_attn_idx, 0xda0050, 0xda005c, 0xda0058, 0xda0054
+};
+
+static struct attn_hw_reg *pbf_pb1_prty_k2_regs[1] = {
+ &pbf_pb1_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb2_int_attn_desc[9] = {
+ "pbf_pb2_address_error",
+ "pbf_pb2_eop_error",
+ "pbf_pb2_ififo_error",
+ "pbf_pb2_pfifo_error",
+ "pbf_pb2_db_buf_error",
+ "pbf_pb2_th_exec_error",
+ "pbf_pb2_tq_error_wr",
+ "pbf_pb2_tq_error_rd_th",
+ "pbf_pb2_tq_error_rd_ih",
+};
+#else
+#define pbf_pb2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb2_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_a0 = {
+ 0, 9, pbf_pb2_int0_bb_a0_attn_idx, 0xda4040, 0xda404c, 0xda4048,
+ 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_a0_regs[1] = {
+ &pbf_pb2_int0_bb_a0,
+};
+
+static const u16 pbf_pb2_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
+ 0, 9, pbf_pb2_int0_bb_b0_attn_idx, 0xda4040, 0xda404c, 0xda4048,
+ 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
+ &pbf_pb2_int0_bb_b0,
+};
+
+static const u16 pbf_pb2_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_k2 = {
+ 0, 9, pbf_pb2_int0_k2_attn_idx, 0xda4040, 0xda404c, 0xda4048, 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_k2_regs[1] = {
+ &pbf_pb2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb2_prty_attn_desc[1] = {
+ "pbf_pb2_datapath_registers",
+};
+#else
+#define pbf_pb2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb2_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
+ 0, 1, pbf_pb2_prty0_bb_b0_attn_idx, 0xda4050, 0xda405c, 0xda4058,
+ 0xda4054
+};
+
+static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
+ &pbf_pb2_prty0_bb_b0,
+};
+
+static const u16 pbf_pb2_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_pb2_prty0_k2 = {
+ 0, 1, pbf_pb2_prty0_k2_attn_idx, 0xda4050, 0xda405c, 0xda4058, 0xda4054
+};
+
+static struct attn_hw_reg *pbf_pb2_prty_k2_regs[1] = {
+ &pbf_pb2_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rpb_int_attn_desc[9] = {
+ "rpb_address_error",
+ "rpb_eop_error",
+ "rpb_ififo_error",
+ "rpb_pfifo_error",
+ "rpb_db_buf_error",
+ "rpb_th_exec_error",
+ "rpb_tq_error_wr",
+ "rpb_tq_error_rd_th",
+ "rpb_tq_error_rd_ih",
+};
+#else
+#define rpb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rpb_int0_bb_a0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_bb_a0 = {
+ 0, 9, rpb_int0_bb_a0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_bb_a0_regs[1] = {
+ &rpb_int0_bb_a0,
+};
+
+static const u16 rpb_int0_bb_b0_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_bb_b0 = {
+ 0, 9, rpb_int0_bb_b0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
+ &rpb_int0_bb_b0,
+};
+
+static const u16 rpb_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_k2 = {
+ 0, 9, rpb_int0_k2_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_k2_regs[1] = {
+ &rpb_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rpb_prty_attn_desc[1] = {
+ "rpb_datapath_registers",
+};
+#else
+#define rpb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rpb_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg rpb_prty0_bb_b0 = {
+ 0, 1, rpb_prty0_bb_b0_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054
+};
+
+static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
+ &rpb_prty0_bb_b0,
+};
+
+static const u16 rpb_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg rpb_prty0_k2 = {
+ 0, 1, rpb_prty0_k2_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054
+};
+
+static struct attn_hw_reg *rpb_prty_k2_regs[1] = {
+ &rpb_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *btb_int_attn_desc[139] = {
+ "btb_address_error",
+ "btb_rc_pkt0_rls_error",
+ "btb_unused_0",
+ "btb_rc_pkt0_len_error",
+ "btb_unused_1",
+ "btb_rc_pkt0_protocol_error",
+ "btb_rc_pkt1_rls_error",
+ "btb_unused_2",
+ "btb_rc_pkt1_len_error",
+ "btb_unused_3",
+ "btb_rc_pkt1_protocol_error",
+ "btb_rc_pkt2_rls_error",
+ "btb_unused_4",
+ "btb_rc_pkt2_len_error",
+ "btb_unused_5",
+ "btb_rc_pkt2_protocol_error",
+ "btb_rc_pkt3_rls_error",
+ "btb_unused_6",
+ "btb_rc_pkt3_len_error",
+ "btb_unused_7",
+ "btb_rc_pkt3_protocol_error",
+ "btb_rc_sop_req_tc_port_error",
+ "btb_unused_8",
+ "btb_wc0_protocol_error",
+ "btb_unused_9",
+ "btb_ll_blk_error",
+ "btb_ll_arb_calc_error",
+ "btb_fc_alm_calc_error",
+ "btb_wc0_inp_fifo_error",
+ "btb_wc0_sop_fifo_error",
+ "btb_wc0_len_fifo_error",
+ "btb_wc0_eop_fifo_error",
+ "btb_wc0_queue_fifo_error",
+ "btb_wc0_free_point_fifo_error",
+ "btb_wc0_next_point_fifo_error",
+ "btb_wc0_strt_fifo_error",
+ "btb_wc0_second_dscr_fifo_error",
+ "btb_wc0_pkt_avail_fifo_error",
+ "btb_wc0_notify_fifo_error",
+ "btb_wc0_ll_req_fifo_error",
+ "btb_wc0_ll_pa_cnt_error",
+ "btb_wc0_bb_pa_cnt_error",
+ "btb_wc_dup_upd_data_fifo_error",
+ "btb_wc_dup_rsp_dscr_fifo_error",
+ "btb_wc_dup_upd_point_fifo_error",
+ "btb_wc_dup_pkt_avail_fifo_error",
+ "btb_wc_dup_pkt_avail_cnt_error",
+ "btb_rc_pkt0_side_fifo_error",
+ "btb_rc_pkt0_req_fifo_error",
+ "btb_rc_pkt0_blk_fifo_error",
+ "btb_rc_pkt0_rls_left_fifo_error",
+ "btb_rc_pkt0_strt_ptr_fifo_error",
+ "btb_rc_pkt0_second_ptr_fifo_error",
+ "btb_rc_pkt0_rsp_fifo_error",
+ "btb_rc_pkt0_dscr_fifo_error",
+ "btb_rc_pkt1_side_fifo_error",
+ "btb_rc_pkt1_req_fifo_error",
+ "btb_rc_pkt1_blk_fifo_error",
+ "btb_rc_pkt1_rls_left_fifo_error",
+ "btb_rc_pkt1_strt_ptr_fifo_error",
+ "btb_rc_pkt1_second_ptr_fifo_error",
+ "btb_rc_pkt1_rsp_fifo_error",
+ "btb_rc_pkt1_dscr_fifo_error",
+ "btb_rc_pkt2_side_fifo_error",
+ "btb_rc_pkt2_req_fifo_error",
+ "btb_rc_pkt2_blk_fifo_error",
+ "btb_rc_pkt2_rls_left_fifo_error",
+ "btb_rc_pkt2_strt_ptr_fifo_error",
+ "btb_rc_pkt2_second_ptr_fifo_error",
+ "btb_rc_pkt2_rsp_fifo_error",
+ "btb_rc_pkt2_dscr_fifo_error",
+ "btb_rc_pkt3_side_fifo_error",
+ "btb_rc_pkt3_req_fifo_error",
+ "btb_rc_pkt3_blk_fifo_error",
+ "btb_rc_pkt3_rls_left_fifo_error",
+ "btb_rc_pkt3_strt_ptr_fifo_error",
+ "btb_rc_pkt3_second_ptr_fifo_error",
+ "btb_rc_pkt3_rsp_fifo_error",
+ "btb_rc_pkt3_dscr_fifo_error",
+ "btb_rc_sop_queue_fifo_error",
+ "btb_ll_arb_rls_fifo_error",
+ "btb_ll_arb_prefetch_fifo_error",
+ "btb_rc_pkt0_rls_fifo_error",
+ "btb_rc_pkt1_rls_fifo_error",
+ "btb_rc_pkt2_rls_fifo_error",
+ "btb_rc_pkt3_rls_fifo_error",
+ "btb_rc_pkt4_rls_fifo_error",
+ "btb_rc_pkt5_rls_fifo_error",
+ "btb_rc_pkt6_rls_fifo_error",
+ "btb_rc_pkt7_rls_fifo_error",
+ "btb_rc_pkt4_rls_error",
+ "btb_rc_pkt4_len_error",
+ "btb_rc_pkt4_protocol_error",
+ "btb_rc_pkt4_side_fifo_error",
+ "btb_rc_pkt4_req_fifo_error",
+ "btb_rc_pkt4_blk_fifo_error",
+ "btb_rc_pkt4_rls_left_fifo_error",
+ "btb_rc_pkt4_strt_ptr_fifo_error",
+ "btb_rc_pkt4_second_ptr_fifo_error",
+ "btb_rc_pkt4_rsp_fifo_error",
+ "btb_rc_pkt4_dscr_fifo_error",
+ "btb_rc_pkt5_rls_error",
+ "btb_rc_pkt5_len_error",
+ "btb_rc_pkt5_protocol_error",
+ "btb_rc_pkt5_side_fifo_error",
+ "btb_rc_pkt5_req_fifo_error",
+ "btb_rc_pkt5_blk_fifo_error",
+ "btb_rc_pkt5_rls_left_fifo_error",
+ "btb_rc_pkt5_strt_ptr_fifo_error",
+ "btb_rc_pkt5_second_ptr_fifo_error",
+ "btb_rc_pkt5_rsp_fifo_error",
+ "btb_rc_pkt5_dscr_fifo_error",
+ "btb_rc_pkt6_rls_error",
+ "btb_rc_pkt6_len_error",
+ "btb_rc_pkt6_protocol_error",
+ "btb_rc_pkt6_side_fifo_error",
+ "btb_rc_pkt6_req_fifo_error",
+ "btb_rc_pkt6_blk_fifo_error",
+ "btb_rc_pkt6_rls_left_fifo_error",
+ "btb_rc_pkt6_strt_ptr_fifo_error",
+ "btb_rc_pkt6_second_ptr_fifo_error",
+ "btb_rc_pkt6_rsp_fifo_error",
+ "btb_rc_pkt6_dscr_fifo_error",
+ "btb_rc_pkt7_rls_error",
+ "btb_rc_pkt7_len_error",
+ "btb_rc_pkt7_protocol_error",
+ "btb_rc_pkt7_side_fifo_error",
+ "btb_rc_pkt7_req_fifo_error",
+ "btb_rc_pkt7_blk_fifo_error",
+ "btb_rc_pkt7_rls_left_fifo_error",
+ "btb_rc_pkt7_strt_ptr_fifo_error",
+ "btb_rc_pkt7_second_ptr_fifo_error",
+ "btb_rc_pkt7_rsp_fifo_error",
+ "btb_packet_available_sync_fifo_push_error",
+ "btb_wc6_notify_fifo_error",
+ "btb_wc9_queue_fifo_error",
+ "btb_wc0_sync_fifo_push_error",
+ "btb_rls_sync_fifo_push_error",
+ "btb_rc_pkt7_dscr_fifo_error",
+};
+#else
+#define btb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 btb_int0_bb_a0_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_bb_a0 = {
+ 0, 16, btb_int0_bb_a0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_bb_a0_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_bb_a0 = {
+ 1, 16, btb_int1_bb_a0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_bb_a0_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_bb_a0 = {
+ 2, 4, btb_int2_bb_a0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_bb_a0_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_bb_a0 = {
+ 3, 32, btb_int3_bb_a0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_bb_a0_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_bb_a0 = {
+ 4, 23, btb_int4_bb_a0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_bb_a0_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_bb_a0 = {
+ 5, 32, btb_int5_bb_a0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_bb_a0_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_bb_a0 = {
+ 6, 1, btb_int6_bb_a0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_bb_a0_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_bb_a0 = {
+ 7, 1, btb_int8_bb_a0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_bb_a0_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_bb_a0 = {
+ 8, 1, btb_int9_bb_a0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_bb_a0_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_bb_a0 = {
+ 9, 1, btb_int10_bb_a0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_bb_a0_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_bb_a0 = {
+ 10, 2, btb_int11_bb_a0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_bb_a0_regs[11] = {
+ &btb_int0_bb_a0, &btb_int1_bb_a0, &btb_int2_bb_a0, &btb_int3_bb_a0,
+ &btb_int4_bb_a0, &btb_int5_bb_a0, &btb_int6_bb_a0, &btb_int8_bb_a0,
+ &btb_int9_bb_a0, &btb_int10_bb_a0,
+ &btb_int11_bb_a0,
+};
+
+static const u16 btb_int0_bb_b0_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_bb_b0 = {
+ 0, 16, btb_int0_bb_b0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_bb_b0_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_bb_b0 = {
+ 1, 16, btb_int1_bb_b0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_bb_b0_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_bb_b0 = {
+ 2, 4, btb_int2_bb_b0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_bb_b0_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_bb_b0 = {
+ 3, 32, btb_int3_bb_b0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_bb_b0_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_bb_b0 = {
+ 4, 23, btb_int4_bb_b0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_bb_b0_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_bb_b0 = {
+ 5, 32, btb_int5_bb_b0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_bb_b0_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_bb_b0 = {
+ 6, 1, btb_int6_bb_b0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_bb_b0_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_bb_b0 = {
+ 7, 1, btb_int8_bb_b0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_bb_b0_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_bb_b0 = {
+ 8, 1, btb_int9_bb_b0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_bb_b0_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_bb_b0 = {
+ 9, 1, btb_int10_bb_b0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_bb_b0_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_bb_b0 = {
+ 10, 2, btb_int11_bb_b0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
+ &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
+ &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
+ &btb_int9_bb_b0, &btb_int10_bb_b0,
+ &btb_int11_bb_b0,
+};
+
+static const u16 btb_int0_k2_attn_idx[16] = {
+ 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_k2 = {
+ 0, 16, btb_int0_k2_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_k2_attn_idx[16] = {
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_k2 = {
+ 1, 16, btb_int1_k2_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_k2_attn_idx[4] = {
+ 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_k2 = {
+ 2, 4, btb_int2_k2_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_k2_attn_idx[32] = {
+ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_k2 = {
+ 3, 32, btb_int3_k2_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_k2_attn_idx[23] = {
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_k2 = {
+ 4, 23, btb_int4_k2_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_k2_attn_idx[32] = {
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ 130, 131,
+ 132,
+};
+
+static struct attn_hw_reg btb_int5_k2 = {
+ 5, 32, btb_int5_k2_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_k2_attn_idx[1] = {
+ 133,
+};
+
+static struct attn_hw_reg btb_int6_k2 = {
+ 6, 1, btb_int6_k2_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_k2_attn_idx[1] = {
+ 134,
+};
+
+static struct attn_hw_reg btb_int8_k2 = {
+ 7, 1, btb_int8_k2_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_k2_attn_idx[1] = {
+ 135,
+};
+
+static struct attn_hw_reg btb_int9_k2 = {
+ 8, 1, btb_int9_k2_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_k2_attn_idx[1] = {
+ 136,
+};
+
+static struct attn_hw_reg btb_int10_k2 = {
+ 9, 1, btb_int10_k2_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_k2_attn_idx[2] = {
+ 137, 138,
+};
+
+static struct attn_hw_reg btb_int11_k2 = {
+ 10, 2, btb_int11_k2_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_k2_regs[11] = {
+ &btb_int0_k2, &btb_int1_k2, &btb_int2_k2, &btb_int3_k2, &btb_int4_k2,
+ &btb_int5_k2, &btb_int6_k2, &btb_int8_k2, &btb_int9_k2, &btb_int10_k2,
+ &btb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *btb_prty_attn_desc[36] = {
+ "btb_ll_bank0_mem_prty",
+ "btb_ll_bank1_mem_prty",
+ "btb_ll_bank2_mem_prty",
+ "btb_ll_bank3_mem_prty",
+ "btb_datapath_registers",
+ "btb_mem001_i_ecc_rf_int",
+ "btb_mem008_i_ecc_rf_int",
+ "btb_mem009_i_ecc_rf_int",
+ "btb_mem010_i_ecc_rf_int",
+ "btb_mem011_i_ecc_rf_int",
+ "btb_mem012_i_ecc_rf_int",
+ "btb_mem013_i_ecc_rf_int",
+ "btb_mem014_i_ecc_rf_int",
+ "btb_mem015_i_ecc_rf_int",
+ "btb_mem016_i_ecc_rf_int",
+ "btb_mem002_i_ecc_rf_int",
+ "btb_mem003_i_ecc_rf_int",
+ "btb_mem004_i_ecc_rf_int",
+ "btb_mem005_i_ecc_rf_int",
+ "btb_mem006_i_ecc_rf_int",
+ "btb_mem007_i_ecc_rf_int",
+ "btb_mem033_i_mem_prty",
+ "btb_mem035_i_mem_prty",
+ "btb_mem034_i_mem_prty",
+ "btb_mem032_i_mem_prty",
+ "btb_mem031_i_mem_prty",
+ "btb_mem021_i_mem_prty",
+ "btb_mem022_i_mem_prty",
+ "btb_mem023_i_mem_prty",
+ "btb_mem024_i_mem_prty",
+ "btb_mem025_i_mem_prty",
+ "btb_mem026_i_mem_prty",
+ "btb_mem027_i_mem_prty",
+ "btb_mem028_i_mem_prty",
+ "btb_mem030_i_mem_prty",
+ "btb_mem029_i_mem_prty",
+};
+#else
+#define btb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 btb_prty1_bb_a0_attn_idx[27] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 26, 27,
+ 28,
+ 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_bb_a0 = {
+ 0, 27, btb_prty1_bb_a0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_bb_a0_regs[1] = {
+ &btb_prty1_bb_a0,
+};
+
+static const u16 btb_prty0_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg btb_prty0_bb_b0 = {
+ 0, 5, btb_prty0_bb_b0_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0
+};
+
+static const u16 btb_prty1_bb_b0_attn_idx[23] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30, 31,
+ 32,
+ 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_bb_b0 = {
+ 1, 23, btb_prty1_bb_b0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
+ &btb_prty0_bb_b0, &btb_prty1_bb_b0,
+};
+
+static const u16 btb_prty0_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg btb_prty0_k2 = {
+ 0, 5, btb_prty0_k2_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0
+};
+
+static const u16 btb_prty1_k2_attn_idx[31] = {
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_k2 = {
+ 1, 31, btb_prty1_k2_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_k2_regs[2] = {
+ &btb_prty0_k2, &btb_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_int_attn_desc[1] = {
+ "pbf_address_error",
+};
+#else
+#define pbf_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_bb_a0 = {
+ 0, 1, pbf_int0_bb_a0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_bb_a0_regs[1] = {
+ &pbf_int0_bb_a0,
+};
+
+static const u16 pbf_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_bb_b0 = {
+ 0, 1, pbf_int0_bb_b0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
+ &pbf_int0_bb_b0,
+};
+
+static const u16 pbf_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_int0_k2 = {
+ 0, 1, pbf_int0_k2_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_k2_regs[1] = {
+ &pbf_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_prty_attn_desc[59] = {
+ "pbf_datapath_registers",
+ "pbf_mem041_i_ecc_rf_int",
+ "pbf_mem042_i_ecc_rf_int",
+ "pbf_mem033_i_ecc_rf_int",
+ "pbf_mem003_i_ecc_rf_int",
+ "pbf_mem018_i_ecc_rf_int",
+ "pbf_mem009_i_ecc_0_rf_int",
+ "pbf_mem009_i_ecc_1_rf_int",
+ "pbf_mem012_i_ecc_0_rf_int",
+ "pbf_mem012_i_ecc_1_rf_int",
+ "pbf_mem012_i_ecc_2_rf_int",
+ "pbf_mem012_i_ecc_3_rf_int",
+ "pbf_mem012_i_ecc_4_rf_int",
+ "pbf_mem012_i_ecc_5_rf_int",
+ "pbf_mem012_i_ecc_6_rf_int",
+ "pbf_mem012_i_ecc_7_rf_int",
+ "pbf_mem012_i_ecc_8_rf_int",
+ "pbf_mem012_i_ecc_9_rf_int",
+ "pbf_mem012_i_ecc_10_rf_int",
+ "pbf_mem012_i_ecc_11_rf_int",
+ "pbf_mem012_i_ecc_12_rf_int",
+ "pbf_mem012_i_ecc_13_rf_int",
+ "pbf_mem012_i_ecc_14_rf_int",
+ "pbf_mem012_i_ecc_15_rf_int",
+ "pbf_mem040_i_mem_prty",
+ "pbf_mem039_i_mem_prty",
+ "pbf_mem038_i_mem_prty",
+ "pbf_mem034_i_mem_prty",
+ "pbf_mem032_i_mem_prty",
+ "pbf_mem031_i_mem_prty",
+ "pbf_mem030_i_mem_prty",
+ "pbf_mem029_i_mem_prty",
+ "pbf_mem022_i_mem_prty",
+ "pbf_mem023_i_mem_prty",
+ "pbf_mem021_i_mem_prty",
+ "pbf_mem020_i_mem_prty",
+ "pbf_mem001_i_mem_prty",
+ "pbf_mem002_i_mem_prty",
+ "pbf_mem006_i_mem_prty",
+ "pbf_mem007_i_mem_prty",
+ "pbf_mem005_i_mem_prty",
+ "pbf_mem004_i_mem_prty",
+ "pbf_mem028_i_mem_prty",
+ "pbf_mem026_i_mem_prty",
+ "pbf_mem027_i_mem_prty",
+ "pbf_mem019_i_mem_prty",
+ "pbf_mem016_i_mem_prty",
+ "pbf_mem017_i_mem_prty",
+ "pbf_mem008_i_mem_prty",
+ "pbf_mem011_i_mem_prty",
+ "pbf_mem010_i_mem_prty",
+ "pbf_mem024_i_mem_prty",
+ "pbf_mem025_i_mem_prty",
+ "pbf_mem037_i_mem_prty",
+ "pbf_mem036_i_mem_prty",
+ "pbf_mem035_i_mem_prty",
+ "pbf_mem014_i_mem_prty",
+ "pbf_mem015_i_mem_prty",
+ "pbf_mem013_i_mem_prty",
+};
+#else
+#define pbf_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_bb_a0 = {
+ 0, 31, pbf_prty1_bb_a0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_bb_a0_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_bb_a0 = {
+ 1, 27, pbf_prty2_bb_a0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_bb_a0_regs[2] = {
+ &pbf_prty1_bb_a0, &pbf_prty2_bb_a0,
+};
+
+static const u16 pbf_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_prty0_bb_b0 = {
+ 0, 1, pbf_prty0_bb_b0_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194
+};
+
+static const u16 pbf_prty1_bb_b0_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_bb_b0 = {
+ 1, 31, pbf_prty1_bb_b0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_bb_b0_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_bb_b0 = {
+ 2, 27, pbf_prty2_bb_b0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
+ &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0,
+};
+
+static const u16 pbf_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg pbf_prty0_k2 = {
+ 0, 1, pbf_prty0_k2_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194
+};
+
+static const u16 pbf_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_k2 = {
+ 1, 31, pbf_prty1_k2_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_k2_attn_idx[27] = {
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_k2 = {
+ 2, 27, pbf_prty2_k2_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_k2_regs[3] = {
+ &pbf_prty0_k2, &pbf_prty1_k2, &pbf_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rdif_int_attn_desc[9] = {
+ "rdif_address_error",
+ "rdif_fatal_dix_err",
+ "rdif_fatal_config_err",
+ "rdif_cmd_fifo_err",
+ "rdif_order_fifo_err",
+ "rdif_rdata_fifo_err",
+ "rdif_dif_stop_err",
+ "rdif_partial_dif_w_eob",
+ "rdif_l1_dirty_bit",
+};
+#else
+#define rdif_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rdif_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg rdif_int0_bb_a0 = {
+ 0, 8, rdif_int0_bb_a0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_bb_a0_regs[1] = {
+ &rdif_int0_bb_a0,
+};
+
+static const u16 rdif_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg rdif_int0_bb_b0 = {
+ 0, 8, rdif_int0_bb_b0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
+ &rdif_int0_bb_b0,
+};
+
+static const u16 rdif_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rdif_int0_k2 = {
+ 0, 9, rdif_int0_k2_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_k2_regs[1] = {
+ &rdif_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rdif_prty_attn_desc[2] = {
+ "rdif_unused_0",
+ "rdif_datapath_registers",
+};
+#else
+#define rdif_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rdif_prty0_bb_b0_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg rdif_prty0_bb_b0 = {
+ 0, 1, rdif_prty0_bb_b0_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194
+};
+
+static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
+ &rdif_prty0_bb_b0,
+};
+
+static const u16 rdif_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg rdif_prty0_k2 = {
+ 0, 1, rdif_prty0_k2_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194
+};
+
+static struct attn_hw_reg *rdif_prty_k2_regs[1] = {
+ &rdif_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tdif_int_attn_desc[9] = {
+ "tdif_address_error",
+ "tdif_fatal_dix_err",
+ "tdif_fatal_config_err",
+ "tdif_cmd_fifo_err",
+ "tdif_order_fifo_err",
+ "tdif_rdata_fifo_err",
+ "tdif_dif_stop_err",
+ "tdif_partial_dif_w_eob",
+ "tdif_l1_dirty_bit",
+};
+#else
+#define tdif_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tdif_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tdif_int0_bb_a0 = {
+ 0, 8, tdif_int0_bb_a0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_bb_a0_regs[1] = {
+ &tdif_int0_bb_a0,
+};
+
+static const u16 tdif_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tdif_int0_bb_b0 = {
+ 0, 8, tdif_int0_bb_b0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
+ &tdif_int0_bb_b0,
+};
+
+static const u16 tdif_int0_k2_attn_idx[9] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tdif_int0_k2 = {
+ 0, 9, tdif_int0_k2_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_k2_regs[1] = {
+ &tdif_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tdif_prty_attn_desc[13] = {
+ "tdif_unused_0",
+ "tdif_datapath_registers",
+ "tdif_mem005_i_ecc_rf_int",
+ "tdif_mem009_i_ecc_rf_int",
+ "tdif_mem010_i_ecc_rf_int",
+ "tdif_mem011_i_ecc_rf_int",
+ "tdif_mem001_i_mem_prty",
+ "tdif_mem003_i_mem_prty",
+ "tdif_mem002_i_mem_prty",
+ "tdif_mem006_i_mem_prty",
+ "tdif_mem007_i_mem_prty",
+ "tdif_mem008_i_mem_prty",
+ "tdif_mem004_i_mem_prty",
+};
+#else
+#define tdif_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tdif_prty1_bb_a0_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_bb_a0 = {
+ 0, 11, tdif_prty1_bb_a0_attn_idx, 0x310200, 0x31020c, 0x310208,
+ 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_bb_a0_regs[1] = {
+ &tdif_prty1_bb_a0,
+};
+
+static const u16 tdif_prty0_bb_b0_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg tdif_prty0_bb_b0 = {
+ 0, 1, tdif_prty0_bb_b0_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194
+};
+
+static const u16 tdif_prty1_bb_b0_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_bb_b0 = {
+ 1, 11, tdif_prty1_bb_b0_attn_idx, 0x310200, 0x31020c, 0x310208,
+ 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
+ &tdif_prty0_bb_b0, &tdif_prty1_bb_b0,
+};
+
+static const u16 tdif_prty0_k2_attn_idx[1] = {
+ 1,
+};
+
+static struct attn_hw_reg tdif_prty0_k2 = {
+ 0, 1, tdif_prty0_k2_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194
+};
+
+static const u16 tdif_prty1_k2_attn_idx[11] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_k2 = {
+ 1, 11, tdif_prty1_k2_attn_idx, 0x310200, 0x31020c, 0x310208, 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_k2_regs[2] = {
+ &tdif_prty0_k2, &tdif_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cdu_int_attn_desc[8] = {
+ "cdu_address_error",
+ "cdu_ccfc_ld_l1_num_error",
+ "cdu_tcfc_ld_l1_num_error",
+ "cdu_ccfc_wb_l1_num_error",
+ "cdu_tcfc_wb_l1_num_error",
+ "cdu_ccfc_cvld_error",
+ "cdu_tcfc_cvld_error",
+ "cdu_bvalid_error",
+};
+#else
+#define cdu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cdu_int0_bb_a0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_bb_a0 = {
+ 0, 8, cdu_int0_bb_a0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_bb_a0_regs[1] = {
+ &cdu_int0_bb_a0,
+};
+
+static const u16 cdu_int0_bb_b0_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_bb_b0 = {
+ 0, 8, cdu_int0_bb_b0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
+ &cdu_int0_bb_b0,
+};
+
+static const u16 cdu_int0_k2_attn_idx[8] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_k2 = {
+ 0, 8, cdu_int0_k2_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_k2_regs[1] = {
+ &cdu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cdu_prty_attn_desc[5] = {
+ "cdu_mem001_i_mem_prty",
+ "cdu_mem004_i_mem_prty",
+ "cdu_mem002_i_mem_prty",
+ "cdu_mem005_i_mem_prty",
+ "cdu_mem003_i_mem_prty",
+};
+#else
+#define cdu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cdu_prty1_bb_a0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_bb_a0 = {
+ 0, 5, cdu_prty1_bb_a0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_bb_a0_regs[1] = {
+ &cdu_prty1_bb_a0,
+};
+
+static const u16 cdu_prty1_bb_b0_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_bb_b0 = {
+ 0, 5, cdu_prty1_bb_b0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
+ &cdu_prty1_bb_b0,
+};
+
+static const u16 cdu_prty1_k2_attn_idx[5] = {
+ 0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_k2 = {
+ 0, 5, cdu_prty1_k2_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_k2_regs[1] = {
+ &cdu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ccfc_int_attn_desc[2] = {
+ "ccfc_address_error",
+ "ccfc_exe_error",
+};
+#else
+#define ccfc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ccfc_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_bb_a0 = {
+ 0, 2, ccfc_int0_bb_a0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_bb_a0_regs[1] = {
+ &ccfc_int0_bb_a0,
+};
+
+static const u16 ccfc_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_bb_b0 = {
+ 0, 2, ccfc_int0_bb_b0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
+ &ccfc_int0_bb_b0,
+};
+
+static const u16 ccfc_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_k2 = {
+ 0, 2, ccfc_int0_k2_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_k2_regs[1] = {
+ &ccfc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ccfc_prty_attn_desc[10] = {
+ "ccfc_mem001_i_ecc_rf_int",
+ "ccfc_mem003_i_mem_prty",
+ "ccfc_mem007_i_mem_prty",
+ "ccfc_mem006_i_mem_prty",
+ "ccfc_ccam_par_err",
+ "ccfc_scam_par_err",
+ "ccfc_lc_que_ram_porta_lsb_par_err",
+ "ccfc_lc_que_ram_porta_msb_par_err",
+ "ccfc_lc_que_ram_portb_lsb_par_err",
+ "ccfc_lc_que_ram_portb_msb_par_err",
+};
+#else
+#define ccfc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ccfc_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg ccfc_prty1_bb_a0 = {
+ 0, 4, ccfc_prty1_bb_a0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_bb_a0_attn_idx[2] = {
+ 4, 5,
+};
+
+static struct attn_hw_reg ccfc_prty0_bb_a0 = {
+ 1, 2, ccfc_prty0_bb_a0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_bb_a0_regs[2] = {
+ &ccfc_prty1_bb_a0, &ccfc_prty0_bb_a0,
+};
+
+static const u16 ccfc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_prty1_bb_b0 = {
+ 0, 2, ccfc_prty1_bb_b0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_bb_b0_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ccfc_prty0_bb_b0 = {
+ 1, 6, ccfc_prty0_bb_b0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
+ &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0,
+};
+
+static const u16 ccfc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg ccfc_prty1_k2 = {
+ 0, 2, ccfc_prty1_k2_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_k2_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ccfc_prty0_k2 = {
+ 1, 6, ccfc_prty0_k2_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_k2_regs[2] = {
+ &ccfc_prty1_k2, &ccfc_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcfc_int_attn_desc[2] = {
+ "tcfc_address_error",
+ "tcfc_exe_error",
+};
+#else
+#define tcfc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcfc_int0_bb_a0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_bb_a0 = {
+ 0, 2, tcfc_int0_bb_a0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_bb_a0_regs[1] = {
+ &tcfc_int0_bb_a0,
+};
+
+static const u16 tcfc_int0_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_bb_b0 = {
+ 0, 2, tcfc_int0_bb_b0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
+ &tcfc_int0_bb_b0,
+};
+
+static const u16 tcfc_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_k2 = {
+ 0, 2, tcfc_int0_k2_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_k2_regs[1] = {
+ &tcfc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcfc_prty_attn_desc[10] = {
+ "tcfc_mem002_i_mem_prty",
+ "tcfc_mem001_i_mem_prty",
+ "tcfc_mem006_i_mem_prty",
+ "tcfc_mem005_i_mem_prty",
+ "tcfc_ccam_par_err",
+ "tcfc_scam_par_err",
+ "tcfc_lc_que_ram_porta_lsb_par_err",
+ "tcfc_lc_que_ram_porta_msb_par_err",
+ "tcfc_lc_que_ram_portb_lsb_par_err",
+ "tcfc_lc_que_ram_portb_msb_par_err",
+};
+#else
+#define tcfc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcfc_prty1_bb_a0_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg tcfc_prty1_bb_a0 = {
+ 0, 4, tcfc_prty1_bb_a0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_bb_a0_attn_idx[2] = {
+ 4, 5,
+};
+
+static struct attn_hw_reg tcfc_prty0_bb_a0 = {
+ 1, 2, tcfc_prty0_bb_a0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_bb_a0_regs[2] = {
+ &tcfc_prty1_bb_a0, &tcfc_prty0_bb_a0,
+};
+
+static const u16 tcfc_prty1_bb_b0_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_prty1_bb_b0 = {
+ 0, 2, tcfc_prty1_bb_b0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_bb_b0_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tcfc_prty0_bb_b0 = {
+ 1, 6, tcfc_prty0_bb_b0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
+ &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0,
+};
+
+static const u16 tcfc_prty1_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg tcfc_prty1_k2 = {
+ 0, 2, tcfc_prty1_k2_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_k2_attn_idx[6] = {
+ 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tcfc_prty0_k2 = {
+ 1, 6, tcfc_prty0_k2_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_k2_regs[2] = {
+ &tcfc_prty1_k2, &tcfc_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *igu_int_attn_desc[11] = {
+ "igu_address_error",
+ "igu_ctrl_fifo_error_err",
+ "igu_pxp_req_length_too_big",
+ "igu_host_tries2access_prod_upd",
+ "igu_vf_tries2acc_attn_cmd",
+ "igu_mme_bigger_then_5",
+ "igu_sb_index_is_not_valid",
+ "igu_durin_int_read_with_simd_dis",
+ "igu_cmd_fid_not_match",
+ "igu_segment_access_invalid",
+ "igu_attn_prod_acc",
+};
+#else
+#define igu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 igu_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_bb_a0 = {
+ 0, 11, igu_int0_bb_a0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_bb_a0_regs[1] = {
+ &igu_int0_bb_a0,
+};
+
+static const u16 igu_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_bb_b0 = {
+ 0, 11, igu_int0_bb_b0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
+ &igu_int0_bb_b0,
+};
+
+static const u16 igu_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_k2 = {
+ 0, 11, igu_int0_k2_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_k2_regs[1] = {
+ &igu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *igu_prty_attn_desc[42] = {
+ "igu_cam_parity",
+ "igu_mem009_i_ecc_rf_int",
+ "igu_mem015_i_mem_prty",
+ "igu_mem016_i_mem_prty",
+ "igu_mem017_i_mem_prty",
+ "igu_mem018_i_mem_prty",
+ "igu_mem019_i_mem_prty",
+ "igu_mem001_i_mem_prty",
+ "igu_mem002_i_mem_prty_0",
+ "igu_mem002_i_mem_prty_1",
+ "igu_mem004_i_mem_prty_0",
+ "igu_mem004_i_mem_prty_1",
+ "igu_mem004_i_mem_prty_2",
+ "igu_mem003_i_mem_prty",
+ "igu_mem005_i_mem_prty",
+ "igu_mem006_i_mem_prty_0",
+ "igu_mem006_i_mem_prty_1",
+ "igu_mem008_i_mem_prty_0",
+ "igu_mem008_i_mem_prty_1",
+ "igu_mem008_i_mem_prty_2",
+ "igu_mem007_i_mem_prty",
+ "igu_mem010_i_mem_prty_0",
+ "igu_mem010_i_mem_prty_1",
+ "igu_mem012_i_mem_prty_0",
+ "igu_mem012_i_mem_prty_1",
+ "igu_mem012_i_mem_prty_2",
+ "igu_mem011_i_mem_prty",
+ "igu_mem013_i_mem_prty",
+ "igu_mem014_i_mem_prty",
+ "igu_mem020_i_mem_prty",
+ "igu_mem003_i_mem_prty_0",
+ "igu_mem003_i_mem_prty_1",
+ "igu_mem003_i_mem_prty_2",
+ "igu_mem002_i_mem_prty",
+ "igu_mem007_i_mem_prty_0",
+ "igu_mem007_i_mem_prty_1",
+ "igu_mem007_i_mem_prty_2",
+ "igu_mem006_i_mem_prty",
+ "igu_mem010_i_mem_prty_2",
+ "igu_mem010_i_mem_prty_3",
+ "igu_mem013_i_mem_prty_0",
+ "igu_mem013_i_mem_prty_1",
+};
+#else
+#define igu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 igu_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_bb_a0 = {
+ 0, 1, igu_prty0_bb_a0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_bb_a0_attn_idx[31] = {
+ 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29,
+ 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg igu_prty1_bb_a0 = {
+ 1, 31, igu_prty1_bb_a0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static const u16 igu_prty2_bb_a0_attn_idx[1] = {
+ 2,
+};
+
+static struct attn_hw_reg igu_prty2_bb_a0 = {
+ 2, 1, igu_prty2_bb_a0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214
+};
+
+static struct attn_hw_reg *igu_prty_bb_a0_regs[3] = {
+ &igu_prty0_bb_a0, &igu_prty1_bb_a0, &igu_prty2_bb_a0,
+};
+
+static const u16 igu_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_bb_b0 = {
+ 0, 1, igu_prty0_bb_b0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_bb_b0_attn_idx[31] = {
+ 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29,
+ 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg igu_prty1_bb_b0 = {
+ 1, 31, igu_prty1_bb_b0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static const u16 igu_prty2_bb_b0_attn_idx[1] = {
+ 2,
+};
+
+static struct attn_hw_reg igu_prty2_bb_b0 = {
+ 2, 1, igu_prty2_bb_b0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214
+};
+
+static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
+ &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0,
+};
+
+static const u16 igu_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg igu_prty0_k2 = {
+ 0, 1, igu_prty0_k2_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_k2_attn_idx[28] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28,
+};
+
+static struct attn_hw_reg igu_prty1_k2 = {
+ 1, 28, igu_prty1_k2_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static struct attn_hw_reg *igu_prty_k2_regs[2] = {
+ &igu_prty0_k2, &igu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cau_int_attn_desc[11] = {
+ "cau_address_error",
+ "cau_unauthorized_pxp_rd_cmd",
+ "cau_unauthorized_pxp_length_cmd",
+ "cau_pxp_sb_address_error",
+ "cau_pxp_pi_number_error",
+ "cau_cleanup_reg_sb_idx_error",
+ "cau_fsm_invalid_line",
+ "cau_cqe_fifo_err",
+ "cau_igu_wdata_fifo_err",
+ "cau_igu_req_fifo_err",
+ "cau_igu_cmd_fifo_err",
+};
+#else
+#define cau_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cau_int0_bb_a0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_bb_a0 = {
+ 0, 11, cau_int0_bb_a0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_bb_a0_regs[1] = {
+ &cau_int0_bb_a0,
+};
+
+static const u16 cau_int0_bb_b0_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_bb_b0 = {
+ 0, 11, cau_int0_bb_b0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
+ &cau_int0_bb_b0,
+};
+
+static const u16 cau_int0_k2_attn_idx[11] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_k2 = {
+ 0, 11, cau_int0_k2_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_k2_regs[1] = {
+ &cau_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cau_prty_attn_desc[15] = {
+ "cau_mem006_i_ecc_rf_int",
+ "cau_mem001_i_ecc_0_rf_int",
+ "cau_mem001_i_ecc_1_rf_int",
+ "cau_mem002_i_ecc_rf_int",
+ "cau_mem004_i_ecc_rf_int",
+ "cau_mem005_i_mem_prty",
+ "cau_mem007_i_mem_prty",
+ "cau_mem008_i_mem_prty",
+ "cau_mem009_i_mem_prty",
+ "cau_mem010_i_mem_prty",
+ "cau_mem011_i_mem_prty",
+ "cau_mem003_i_mem_prty_0",
+ "cau_mem003_i_mem_prty_1",
+ "cau_mem002_i_mem_prty",
+ "cau_mem004_i_mem_prty",
+};
+#else
+#define cau_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cau_prty1_bb_a0_attn_idx[13] = {
+ 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg cau_prty1_bb_a0 = {
+ 0, 13, cau_prty1_bb_a0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_bb_a0_regs[1] = {
+ &cau_prty1_bb_a0,
+};
+
+static const u16 cau_prty1_bb_b0_attn_idx[13] = {
+ 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg cau_prty1_bb_b0 = {
+ 0, 13, cau_prty1_bb_b0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
+ &cau_prty1_bb_b0,
+};
+
+static const u16 cau_prty1_k2_attn_idx[13] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg cau_prty1_k2 = {
+ 0, 13, cau_prty1_k2_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_k2_regs[1] = {
+ &cau_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *umac_int_attn_desc[2] = {
+ "umac_address_error",
+ "umac_tx_overflow",
+};
+#else
+#define umac_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 umac_int0_k2_attn_idx[2] = {
+ 0, 1,
+};
+
+static struct attn_hw_reg umac_int0_k2 = {
+ 0, 2, umac_int0_k2_attn_idx, 0x51180, 0x5118c, 0x51188, 0x51184
+};
+
+static struct attn_hw_reg *umac_int_k2_regs[1] = {
+ &umac_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dbg_int_attn_desc[1] = {
+ "dbg_address_error",
+};
+#else
+#define dbg_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dbg_int0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_bb_a0 = {
+ 0, 1, dbg_int0_bb_a0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_bb_a0_regs[1] = {
+ &dbg_int0_bb_a0,
+};
+
+static const u16 dbg_int0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_bb_b0 = {
+ 0, 1, dbg_int0_bb_b0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
+ &dbg_int0_bb_b0,
+};
+
+static const u16 dbg_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_int0_k2 = {
+ 0, 1, dbg_int0_k2_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_k2_regs[1] = {
+ &dbg_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dbg_prty_attn_desc[1] = {
+ "dbg_mem001_i_mem_prty",
+};
+#else
+#define dbg_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dbg_prty1_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_bb_a0 = {
+ 0, 1, dbg_prty1_bb_a0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_bb_a0_regs[1] = {
+ &dbg_prty1_bb_a0,
+};
+
+static const u16 dbg_prty1_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_bb_b0 = {
+ 0, 1, dbg_prty1_bb_b0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
+ &dbg_prty1_bb_b0,
+};
+
+static const u16 dbg_prty1_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg dbg_prty1_k2 = {
+ 0, 1, dbg_prty1_k2_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_k2_regs[1] = {
+ &dbg_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nig_int_attn_desc[196] = {
+ "nig_address_error",
+ "nig_debug_fifo_error",
+ "nig_dorq_fifo_error",
+ "nig_dbg_syncfifo_error_wr",
+ "nig_dorq_syncfifo_error_wr",
+ "nig_storm_syncfifo_error_wr",
+ "nig_dbgmux_syncfifo_error_wr",
+ "nig_msdm_syncfifo_error_wr",
+ "nig_tsdm_syncfifo_error_wr",
+ "nig_usdm_syncfifo_error_wr",
+ "nig_xsdm_syncfifo_error_wr",
+ "nig_ysdm_syncfifo_error_wr",
+ "nig_tx_sopq0_error",
+ "nig_tx_sopq1_error",
+ "nig_tx_sopq2_error",
+ "nig_tx_sopq3_error",
+ "nig_tx_sopq4_error",
+ "nig_tx_sopq5_error",
+ "nig_tx_sopq6_error",
+ "nig_tx_sopq7_error",
+ "nig_tx_sopq8_error",
+ "nig_tx_sopq9_error",
+ "nig_tx_sopq10_error",
+ "nig_tx_sopq11_error",
+ "nig_tx_sopq12_error",
+ "nig_tx_sopq13_error",
+ "nig_tx_sopq14_error",
+ "nig_tx_sopq15_error",
+ "nig_lb_sopq0_error",
+ "nig_lb_sopq1_error",
+ "nig_lb_sopq2_error",
+ "nig_lb_sopq3_error",
+ "nig_lb_sopq4_error",
+ "nig_lb_sopq5_error",
+ "nig_lb_sopq6_error",
+ "nig_lb_sopq7_error",
+ "nig_lb_sopq8_error",
+ "nig_lb_sopq9_error",
+ "nig_lb_sopq10_error",
+ "nig_lb_sopq11_error",
+ "nig_lb_sopq12_error",
+ "nig_lb_sopq13_error",
+ "nig_lb_sopq14_error",
+ "nig_lb_sopq15_error",
+ "nig_p0_purelb_sopq_error",
+ "nig_p0_rx_macfifo_error",
+ "nig_p0_tx_macfifo_error",
+ "nig_p0_tx_bmb_fifo_error",
+ "nig_p0_lb_bmb_fifo_error",
+ "nig_p0_tx_btb_fifo_error",
+ "nig_p0_lb_btb_fifo_error",
+ "nig_p0_rx_llh_dfifo_error",
+ "nig_p0_tx_llh_dfifo_error",
+ "nig_p0_lb_llh_dfifo_error",
+ "nig_p0_rx_llh_hfifo_error",
+ "nig_p0_tx_llh_hfifo_error",
+ "nig_p0_lb_llh_hfifo_error",
+ "nig_p0_rx_llh_rfifo_error",
+ "nig_p0_tx_llh_rfifo_error",
+ "nig_p0_lb_llh_rfifo_error",
+ "nig_p0_storm_fifo_error",
+ "nig_p0_storm_dscr_fifo_error",
+ "nig_p0_tx_gnt_fifo_error",
+ "nig_p0_lb_gnt_fifo_error",
+ "nig_p0_tx_pause_too_long_int",
+ "nig_p0_tc0_pause_too_long_int",
+ "nig_p0_tc1_pause_too_long_int",
+ "nig_p0_tc2_pause_too_long_int",
+ "nig_p0_tc3_pause_too_long_int",
+ "nig_p0_tc4_pause_too_long_int",
+ "nig_p0_tc5_pause_too_long_int",
+ "nig_p0_tc6_pause_too_long_int",
+ "nig_p0_tc7_pause_too_long_int",
+ "nig_p0_lb_tc0_pause_too_long_int",
+ "nig_p0_lb_tc1_pause_too_long_int",
+ "nig_p0_lb_tc2_pause_too_long_int",
+ "nig_p0_lb_tc3_pause_too_long_int",
+ "nig_p0_lb_tc4_pause_too_long_int",
+ "nig_p0_lb_tc5_pause_too_long_int",
+ "nig_p0_lb_tc6_pause_too_long_int",
+ "nig_p0_lb_tc7_pause_too_long_int",
+ "nig_p0_lb_tc8_pause_too_long_int",
+ "nig_p1_purelb_sopq_error",
+ "nig_p1_rx_macfifo_error",
+ "nig_p1_tx_macfifo_error",
+ "nig_p1_tx_bmb_fifo_error",
+ "nig_p1_lb_bmb_fifo_error",
+ "nig_p1_tx_btb_fifo_error",
+ "nig_p1_lb_btb_fifo_error",
+ "nig_p1_rx_llh_dfifo_error",
+ "nig_p1_tx_llh_dfifo_error",
+ "nig_p1_lb_llh_dfifo_error",
+ "nig_p1_rx_llh_hfifo_error",
+ "nig_p1_tx_llh_hfifo_error",
+ "nig_p1_lb_llh_hfifo_error",
+ "nig_p1_rx_llh_rfifo_error",
+ "nig_p1_tx_llh_rfifo_error",
+ "nig_p1_lb_llh_rfifo_error",
+ "nig_p1_storm_fifo_error",
+ "nig_p1_storm_dscr_fifo_error",
+ "nig_p1_tx_gnt_fifo_error",
+ "nig_p1_lb_gnt_fifo_error",
+ "nig_p1_tx_pause_too_long_int",
+ "nig_p1_tc0_pause_too_long_int",
+ "nig_p1_tc1_pause_too_long_int",
+ "nig_p1_tc2_pause_too_long_int",
+ "nig_p1_tc3_pause_too_long_int",
+ "nig_p1_tc4_pause_too_long_int",
+ "nig_p1_tc5_pause_too_long_int",
+ "nig_p1_tc6_pause_too_long_int",
+ "nig_p1_tc7_pause_too_long_int",
+ "nig_p1_lb_tc0_pause_too_long_int",
+ "nig_p1_lb_tc1_pause_too_long_int",
+ "nig_p1_lb_tc2_pause_too_long_int",
+ "nig_p1_lb_tc3_pause_too_long_int",
+ "nig_p1_lb_tc4_pause_too_long_int",
+ "nig_p1_lb_tc5_pause_too_long_int",
+ "nig_p1_lb_tc6_pause_too_long_int",
+ "nig_p1_lb_tc7_pause_too_long_int",
+ "nig_p1_lb_tc8_pause_too_long_int",
+ "nig_p2_purelb_sopq_error",
+ "nig_p2_rx_macfifo_error",
+ "nig_p2_tx_macfifo_error",
+ "nig_p2_tx_bmb_fifo_error",
+ "nig_p2_lb_bmb_fifo_error",
+ "nig_p2_tx_btb_fifo_error",
+ "nig_p2_lb_btb_fifo_error",
+ "nig_p2_rx_llh_dfifo_error",
+ "nig_p2_tx_llh_dfifo_error",
+ "nig_p2_lb_llh_dfifo_error",
+ "nig_p2_rx_llh_hfifo_error",
+ "nig_p2_tx_llh_hfifo_error",
+ "nig_p2_lb_llh_hfifo_error",
+ "nig_p2_rx_llh_rfifo_error",
+ "nig_p2_tx_llh_rfifo_error",
+ "nig_p2_lb_llh_rfifo_error",
+ "nig_p2_storm_fifo_error",
+ "nig_p2_storm_dscr_fifo_error",
+ "nig_p2_tx_gnt_fifo_error",
+ "nig_p2_lb_gnt_fifo_error",
+ "nig_p2_tx_pause_too_long_int",
+ "nig_p2_tc0_pause_too_long_int",
+ "nig_p2_tc1_pause_too_long_int",
+ "nig_p2_tc2_pause_too_long_int",
+ "nig_p2_tc3_pause_too_long_int",
+ "nig_p2_tc4_pause_too_long_int",
+ "nig_p2_tc5_pause_too_long_int",
+ "nig_p2_tc6_pause_too_long_int",
+ "nig_p2_tc7_pause_too_long_int",
+ "nig_p2_lb_tc0_pause_too_long_int",
+ "nig_p2_lb_tc1_pause_too_long_int",
+ "nig_p2_lb_tc2_pause_too_long_int",
+ "nig_p2_lb_tc3_pause_too_long_int",
+ "nig_p2_lb_tc4_pause_too_long_int",
+ "nig_p2_lb_tc5_pause_too_long_int",
+ "nig_p2_lb_tc6_pause_too_long_int",
+ "nig_p2_lb_tc7_pause_too_long_int",
+ "nig_p2_lb_tc8_pause_too_long_int",
+ "nig_p3_purelb_sopq_error",
+ "nig_p3_rx_macfifo_error",
+ "nig_p3_tx_macfifo_error",
+ "nig_p3_tx_bmb_fifo_error",
+ "nig_p3_lb_bmb_fifo_error",
+ "nig_p3_tx_btb_fifo_error",
+ "nig_p3_lb_btb_fifo_error",
+ "nig_p3_rx_llh_dfifo_error",
+ "nig_p3_tx_llh_dfifo_error",
+ "nig_p3_lb_llh_dfifo_error",
+ "nig_p3_rx_llh_hfifo_error",
+ "nig_p3_tx_llh_hfifo_error",
+ "nig_p3_lb_llh_hfifo_error",
+ "nig_p3_rx_llh_rfifo_error",
+ "nig_p3_tx_llh_rfifo_error",
+ "nig_p3_lb_llh_rfifo_error",
+ "nig_p3_storm_fifo_error",
+ "nig_p3_storm_dscr_fifo_error",
+ "nig_p3_tx_gnt_fifo_error",
+ "nig_p3_lb_gnt_fifo_error",
+ "nig_p3_tx_pause_too_long_int",
+ "nig_p3_tc0_pause_too_long_int",
+ "nig_p3_tc1_pause_too_long_int",
+ "nig_p3_tc2_pause_too_long_int",
+ "nig_p3_tc3_pause_too_long_int",
+ "nig_p3_tc4_pause_too_long_int",
+ "nig_p3_tc5_pause_too_long_int",
+ "nig_p3_tc6_pause_too_long_int",
+ "nig_p3_tc7_pause_too_long_int",
+ "nig_p3_lb_tc0_pause_too_long_int",
+ "nig_p3_lb_tc1_pause_too_long_int",
+ "nig_p3_lb_tc2_pause_too_long_int",
+ "nig_p3_lb_tc3_pause_too_long_int",
+ "nig_p3_lb_tc4_pause_too_long_int",
+ "nig_p3_lb_tc5_pause_too_long_int",
+ "nig_p3_lb_tc6_pause_too_long_int",
+ "nig_p3_lb_tc7_pause_too_long_int",
+ "nig_p3_lb_tc8_pause_too_long_int",
+};
+#else
+#define nig_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nig_int0_bb_a0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_bb_a0 = {
+ 0, 12, nig_int0_bb_a0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_bb_a0_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_bb_a0 = {
+ 1, 32, nig_int1_bb_a0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_bb_a0_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_bb_a0 = {
+ 2, 20, nig_int2_bb_a0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_bb_a0_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_bb_a0 = {
+ 3, 18, nig_int3_bb_a0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_bb_a0_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_bb_a0 = {
+ 4, 20, nig_int4_bb_a0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_bb_a0_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_bb_a0 = {
+ 5, 18, nig_int5_bb_a0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static struct attn_hw_reg *nig_int_bb_a0_regs[6] = {
+ &nig_int0_bb_a0, &nig_int1_bb_a0, &nig_int2_bb_a0, &nig_int3_bb_a0,
+ &nig_int4_bb_a0, &nig_int5_bb_a0,
+};
+
+static const u16 nig_int0_bb_b0_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_bb_b0 = {
+ 0, 12, nig_int0_bb_b0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_bb_b0_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_bb_b0 = {
+ 1, 32, nig_int1_bb_b0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_bb_b0_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_bb_b0 = {
+ 2, 20, nig_int2_bb_b0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_bb_b0_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_bb_b0 = {
+ 3, 18, nig_int3_bb_b0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_bb_b0_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_bb_b0 = {
+ 4, 20, nig_int4_bb_b0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_bb_b0_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_bb_b0 = {
+ 5, 18, nig_int5_bb_b0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
+ &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
+ &nig_int4_bb_b0, &nig_int5_bb_b0,
+};
+
+static const u16 nig_int0_k2_attn_idx[12] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_k2 = {
+ 0, 12, nig_int0_k2_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_k2_attn_idx[32] = {
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_k2 = {
+ 1, 32, nig_int1_k2_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_k2_attn_idx[20] = {
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63,
+};
+
+static struct attn_hw_reg nig_int2_k2 = {
+ 2, 20, nig_int2_k2_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_k2_attn_idx[18] = {
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_k2 = {
+ 3, 18, nig_int3_k2_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_k2_attn_idx[20] = {
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ 100, 101,
+};
+
+static struct attn_hw_reg nig_int4_k2 = {
+ 4, 20, nig_int4_k2_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_k2_attn_idx[18] = {
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116,
+ 117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_k2 = {
+ 5, 18, nig_int5_k2_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static const u16 nig_int6_k2_attn_idx[20] = {
+ 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+ 134,
+ 135, 136, 137, 138, 139,
+};
+
+static struct attn_hw_reg nig_int6_k2 = {
+ 6, 20, nig_int6_k2_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4
+};
+
+static const u16 nig_int7_k2_attn_idx[18] = {
+ 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154,
+ 155, 156, 157,
+};
+
+static struct attn_hw_reg nig_int7_k2 = {
+ 7, 18, nig_int7_k2_attn_idx, 0x5000b0, 0x5000bc, 0x5000b8, 0x5000b4
+};
+
+static const u16 nig_int8_k2_attn_idx[20] = {
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172,
+ 173, 174, 175, 176, 177,
+};
+
+static struct attn_hw_reg nig_int8_k2 = {
+ 8, 20, nig_int8_k2_attn_idx, 0x5000c0, 0x5000cc, 0x5000c8, 0x5000c4
+};
+
+static const u16 nig_int9_k2_attn_idx[18] = {
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192,
+ 193, 194, 195,
+};
+
+static struct attn_hw_reg nig_int9_k2 = {
+ 9, 18, nig_int9_k2_attn_idx, 0x5000d0, 0x5000dc, 0x5000d8, 0x5000d4
+};
+
+static struct attn_hw_reg *nig_int_k2_regs[10] = {
+ &nig_int0_k2, &nig_int1_k2, &nig_int2_k2, &nig_int3_k2, &nig_int4_k2,
+ &nig_int5_k2, &nig_int6_k2, &nig_int7_k2, &nig_int8_k2, &nig_int9_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nig_prty_attn_desc[113] = {
+ "nig_datapath_parity_error",
+ "nig_mem107_i_mem_prty",
+ "nig_mem103_i_mem_prty",
+ "nig_mem104_i_mem_prty",
+ "nig_mem105_i_mem_prty",
+ "nig_mem106_i_mem_prty",
+ "nig_mem072_i_mem_prty",
+ "nig_mem071_i_mem_prty",
+ "nig_mem074_i_mem_prty",
+ "nig_mem073_i_mem_prty",
+ "nig_mem076_i_mem_prty",
+ "nig_mem075_i_mem_prty",
+ "nig_mem078_i_mem_prty",
+ "nig_mem077_i_mem_prty",
+ "nig_mem055_i_mem_prty",
+ "nig_mem062_i_mem_prty",
+ "nig_mem063_i_mem_prty",
+ "nig_mem064_i_mem_prty",
+ "nig_mem065_i_mem_prty",
+ "nig_mem066_i_mem_prty",
+ "nig_mem067_i_mem_prty",
+ "nig_mem068_i_mem_prty",
+ "nig_mem069_i_mem_prty",
+ "nig_mem070_i_mem_prty",
+ "nig_mem056_i_mem_prty",
+ "nig_mem057_i_mem_prty",
+ "nig_mem058_i_mem_prty",
+ "nig_mem059_i_mem_prty",
+ "nig_mem060_i_mem_prty",
+ "nig_mem061_i_mem_prty",
+ "nig_mem035_i_mem_prty",
+ "nig_mem046_i_mem_prty",
+ "nig_mem051_i_mem_prty",
+ "nig_mem052_i_mem_prty",
+ "nig_mem090_i_mem_prty",
+ "nig_mem089_i_mem_prty",
+ "nig_mem092_i_mem_prty",
+ "nig_mem091_i_mem_prty",
+ "nig_mem109_i_mem_prty",
+ "nig_mem110_i_mem_prty",
+ "nig_mem001_i_mem_prty",
+ "nig_mem008_i_mem_prty",
+ "nig_mem009_i_mem_prty",
+ "nig_mem010_i_mem_prty",
+ "nig_mem011_i_mem_prty",
+ "nig_mem012_i_mem_prty",
+ "nig_mem013_i_mem_prty",
+ "nig_mem014_i_mem_prty",
+ "nig_mem015_i_mem_prty",
+ "nig_mem016_i_mem_prty",
+ "nig_mem002_i_mem_prty",
+ "nig_mem003_i_mem_prty",
+ "nig_mem004_i_mem_prty",
+ "nig_mem005_i_mem_prty",
+ "nig_mem006_i_mem_prty",
+ "nig_mem007_i_mem_prty",
+ "nig_mem080_i_mem_prty",
+ "nig_mem081_i_mem_prty",
+ "nig_mem082_i_mem_prty",
+ "nig_mem083_i_mem_prty",
+ "nig_mem048_i_mem_prty",
+ "nig_mem049_i_mem_prty",
+ "nig_mem102_i_mem_prty",
+ "nig_mem087_i_mem_prty",
+ "nig_mem086_i_mem_prty",
+ "nig_mem088_i_mem_prty",
+ "nig_mem079_i_mem_prty",
+ "nig_mem047_i_mem_prty",
+ "nig_mem050_i_mem_prty",
+ "nig_mem053_i_mem_prty",
+ "nig_mem054_i_mem_prty",
+ "nig_mem036_i_mem_prty",
+ "nig_mem037_i_mem_prty",
+ "nig_mem038_i_mem_prty",
+ "nig_mem039_i_mem_prty",
+ "nig_mem040_i_mem_prty",
+ "nig_mem041_i_mem_prty",
+ "nig_mem042_i_mem_prty",
+ "nig_mem043_i_mem_prty",
+ "nig_mem044_i_mem_prty",
+ "nig_mem045_i_mem_prty",
+ "nig_mem093_i_mem_prty",
+ "nig_mem094_i_mem_prty",
+ "nig_mem027_i_mem_prty",
+ "nig_mem028_i_mem_prty",
+ "nig_mem029_i_mem_prty",
+ "nig_mem030_i_mem_prty",
+ "nig_mem017_i_mem_prty",
+ "nig_mem018_i_mem_prty",
+ "nig_mem095_i_mem_prty",
+ "nig_mem084_i_mem_prty",
+ "nig_mem085_i_mem_prty",
+ "nig_mem099_i_mem_prty",
+ "nig_mem100_i_mem_prty",
+ "nig_mem096_i_mem_prty",
+ "nig_mem097_i_mem_prty",
+ "nig_mem098_i_mem_prty",
+ "nig_mem031_i_mem_prty",
+ "nig_mem032_i_mem_prty",
+ "nig_mem033_i_mem_prty",
+ "nig_mem034_i_mem_prty",
+ "nig_mem019_i_mem_prty",
+ "nig_mem020_i_mem_prty",
+ "nig_mem021_i_mem_prty",
+ "nig_mem022_i_mem_prty",
+ "nig_mem101_i_mem_prty",
+ "nig_mem023_i_mem_prty",
+ "nig_mem024_i_mem_prty",
+ "nig_mem025_i_mem_prty",
+ "nig_mem026_i_mem_prty",
+ "nig_mem108_i_mem_prty",
+ "nig_mem031_ext_i_mem_prty",
+ "nig_mem034_ext_i_mem_prty",
+};
+#else
+#define nig_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nig_prty1_bb_a0_attn_idx[31] = {
+ 1, 2, 5, 12, 13, 23, 35, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 60, 61, 62, 63, 64, 65, 66,
+};
+
+static struct attn_hw_reg nig_prty1_bb_a0 = {
+ 0, 31, nig_prty1_bb_a0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_bb_a0_attn_idx[31] = {
+ 33, 69, 70, 90, 91, 8, 11, 10, 14, 17, 18, 19, 20, 21, 22, 7, 6, 24, 25,
+ 26, 27, 28, 29, 15, 16, 57, 58, 59, 9, 94, 95,
+};
+
+static struct attn_hw_reg nig_prty2_bb_a0 = {
+ 1, 31, nig_prty2_bb_a0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_bb_a0_attn_idx[31] = {
+ 96, 97, 98, 103, 104, 92, 93, 105, 106, 107, 108, 109, 80, 31, 67, 83,
+ 84,
+ 3, 68, 85, 86, 89, 77, 78, 79, 4, 32, 36, 81, 82, 87,
+};
+
+static struct attn_hw_reg nig_prty3_bb_a0 = {
+ 2, 31, nig_prty3_bb_a0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_bb_a0_attn_idx[14] = {
+ 88, 101, 102, 75, 71, 74, 76, 73, 72, 34, 37, 99, 30, 100,
+};
+
+static struct attn_hw_reg nig_prty4_bb_a0 = {
+ 3, 14, nig_prty4_bb_a0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_bb_a0_regs[4] = {
+ &nig_prty1_bb_a0, &nig_prty2_bb_a0, &nig_prty3_bb_a0, &nig_prty4_bb_a0,
+};
+
+static const u16 nig_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg nig_prty0_bb_b0 = {
+ 0, 1, nig_prty0_bb_b0_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4
+};
+
+static const u16 nig_prty1_bb_b0_attn_idx[31] = {
+ 4, 5, 9, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+};
+
+static struct attn_hw_reg nig_prty1_bb_b0 = {
+ 1, 31, nig_prty1_bb_b0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_bb_b0_attn_idx[31] = {
+ 90, 91, 64, 63, 65, 8, 11, 10, 13, 12, 66, 14, 17, 18, 19, 20, 21, 22,
+ 23,
+ 7, 6, 24, 25, 26, 27, 28, 29, 15, 16, 92, 93,
+};
+
+static struct attn_hw_reg nig_prty2_bb_b0 = {
+ 2, 31, nig_prty2_bb_b0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_bb_b0_attn_idx[31] = {
+ 94, 95, 96, 97, 99, 100, 103, 104, 105, 62, 108, 109, 80, 31, 1, 67, 60,
+ 69, 83, 84, 2, 3, 110, 61, 68, 70, 85, 86, 111, 112, 89,
+};
+
+static struct attn_hw_reg nig_prty3_bb_b0 = {
+ 3, 31, nig_prty3_bb_b0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_bb_b0_attn_idx[17] = {
+ 106, 107, 87, 88, 81, 82, 101, 102, 75, 71, 74, 76, 77, 78, 79, 73, 72,
+};
+
+static struct attn_hw_reg nig_prty4_bb_b0 = {
+ 4, 17, nig_prty4_bb_b0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
+ &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, &nig_prty3_bb_b0,
+ &nig_prty4_bb_b0,
+};
+
+static const u16 nig_prty0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg nig_prty0_k2 = {
+ 0, 1, nig_prty0_k2_attn_idx, 0x5000e0, 0x5000ec, 0x5000e8, 0x5000e4
+};
+
+static const u16 nig_prty1_k2_attn_idx[31] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg nig_prty1_k2 = {
+ 1, 31, nig_prty1_k2_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_k2_attn_idx[31] = {
+ 67, 60, 61, 68, 32, 33, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 37, 36, 81, 82, 83, 84, 85, 86, 48, 49, 87, 88, 89,
+};
+
+static struct attn_hw_reg nig_prty2_k2 = {
+ 2, 31, nig_prty2_k2_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_k2_attn_idx[31] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 92, 93, 105, 62, 106,
+ 107, 108, 109, 59, 90, 91, 64, 55, 41, 42, 43, 63, 65, 35, 34,
+};
+
+static struct attn_hw_reg nig_prty3_k2 = {
+ 3, 31, nig_prty3_k2_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_k2_attn_idx[14] = {
+ 44, 45, 46, 47, 40, 50, 66, 56, 57, 58, 51, 52, 53, 54,
+};
+
+static struct attn_hw_reg nig_prty4_k2 = {
+ 4, 14, nig_prty4_k2_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_k2_regs[5] = {
+ &nig_prty0_k2, &nig_prty1_k2, &nig_prty2_k2, &nig_prty3_k2,
+ &nig_prty4_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *wol_int_attn_desc[1] = {
+ "wol_address_error",
+};
+#else
+#define wol_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 wol_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg wol_int0_k2 = {
+ 0, 1, wol_int0_k2_attn_idx, 0x600040, 0x60004c, 0x600048, 0x600044
+};
+
+static struct attn_hw_reg *wol_int_k2_regs[1] = {
+ &wol_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *wol_prty_attn_desc[24] = {
+ "wol_mem017_i_mem_prty",
+ "wol_mem018_i_mem_prty",
+ "wol_mem019_i_mem_prty",
+ "wol_mem020_i_mem_prty",
+ "wol_mem021_i_mem_prty",
+ "wol_mem022_i_mem_prty",
+ "wol_mem023_i_mem_prty",
+ "wol_mem024_i_mem_prty",
+ "wol_mem001_i_mem_prty",
+ "wol_mem008_i_mem_prty",
+ "wol_mem009_i_mem_prty",
+ "wol_mem010_i_mem_prty",
+ "wol_mem011_i_mem_prty",
+ "wol_mem012_i_mem_prty",
+ "wol_mem013_i_mem_prty",
+ "wol_mem014_i_mem_prty",
+ "wol_mem015_i_mem_prty",
+ "wol_mem016_i_mem_prty",
+ "wol_mem002_i_mem_prty",
+ "wol_mem003_i_mem_prty",
+ "wol_mem004_i_mem_prty",
+ "wol_mem005_i_mem_prty",
+ "wol_mem006_i_mem_prty",
+ "wol_mem007_i_mem_prty",
+};
+#else
+#define wol_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 wol_prty1_k2_attn_idx[24] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23,
+};
+
+static struct attn_hw_reg wol_prty1_k2 = {
+ 0, 24, wol_prty1_k2_attn_idx, 0x600200, 0x60020c, 0x600208, 0x600204
+};
+
+static struct attn_hw_reg *wol_prty_k2_regs[1] = {
+ &wol_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmbn_int_attn_desc[1] = {
+ "bmbn_address_error",
+};
+#else
+#define bmbn_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmbn_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg bmbn_int0_k2 = {
+ 0, 1, bmbn_int0_k2_attn_idx, 0x610040, 0x61004c, 0x610048, 0x610044
+};
+
+static struct attn_hw_reg *bmbn_int_k2_regs[1] = {
+ &bmbn_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ipc_int_attn_desc[14] = {
+ "ipc_address_error",
+ "ipc_unused_0",
+ "ipc_vmain_por_assert",
+ "ipc_vmain_por_deassert",
+ "ipc_perst_assert",
+ "ipc_perst_deassert",
+ "ipc_otp_ecc_ded_0",
+ "ipc_otp_ecc_ded_1",
+ "ipc_otp_ecc_ded_2",
+ "ipc_otp_ecc_ded_3",
+ "ipc_otp_ecc_ded_4",
+ "ipc_otp_ecc_ded_5",
+ "ipc_otp_ecc_ded_6",
+ "ipc_otp_ecc_ded_7",
+};
+#else
+#define ipc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ipc_int0_bb_a0_attn_idx[5] = {
+ 0, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg ipc_int0_bb_a0 = {
+ 0, 5, ipc_int0_bb_a0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510
+};
+
+static struct attn_hw_reg *ipc_int_bb_a0_regs[1] = {
+ &ipc_int0_bb_a0,
+};
+
+static const u16 ipc_int0_bb_b0_attn_idx[13] = {
+ 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg ipc_int0_bb_b0 = {
+ 0, 13, ipc_int0_bb_b0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510
+};
+
+static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
+ &ipc_int0_bb_b0,
+};
+
+static const u16 ipc_int0_k2_attn_idx[5] = {
+ 0, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg ipc_int0_k2 = {
+ 0, 5, ipc_int0_k2_attn_idx, 0x202dc, 0x202e8, 0x202e4, 0x202e0
+};
+
+static struct attn_hw_reg *ipc_int_k2_regs[1] = {
+ &ipc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ipc_prty_attn_desc[1] = {
+ "ipc_fake_par_err",
+};
+#else
+#define ipc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ipc_prty0_bb_a0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ipc_prty0_bb_a0 = {
+ 0, 1, ipc_prty0_bb_a0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520
+};
+
+static struct attn_hw_reg *ipc_prty_bb_a0_regs[1] = {
+ &ipc_prty0_bb_a0,
+};
+
+static const u16 ipc_prty0_bb_b0_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ipc_prty0_bb_b0 = {
+ 0, 1, ipc_prty0_bb_b0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520
+};
+
+static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
+ &ipc_prty0_bb_b0,
+};
+
+#ifdef ATTN_DESC
+static const char *nwm_int_attn_desc[18] = {
+ "nwm_address_error",
+ "nwm_tx_overflow_0",
+ "nwm_tx_underflow_0",
+ "nwm_tx_overflow_1",
+ "nwm_tx_underflow_1",
+ "nwm_tx_overflow_2",
+ "nwm_tx_underflow_2",
+ "nwm_tx_overflow_3",
+ "nwm_tx_underflow_3",
+ "nwm_unused_0",
+ "nwm_ln0_at_10M",
+ "nwm_ln0_at_100M",
+ "nwm_ln1_at_10M",
+ "nwm_ln1_at_100M",
+ "nwm_ln2_at_10M",
+ "nwm_ln2_at_100M",
+ "nwm_ln3_at_10M",
+ "nwm_ln3_at_100M",
+};
+#else
+#define nwm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nwm_int0_k2_attn_idx[17] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg nwm_int0_k2 = {
+ 0, 17, nwm_int0_k2_attn_idx, 0x800004, 0x800010, 0x80000c, 0x800008
+};
+
+static struct attn_hw_reg *nwm_int_k2_regs[1] = {
+ &nwm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nwm_prty_attn_desc[72] = {
+ "nwm_mem020_i_mem_prty",
+ "nwm_mem028_i_mem_prty",
+ "nwm_mem036_i_mem_prty",
+ "nwm_mem044_i_mem_prty",
+ "nwm_mem023_i_mem_prty",
+ "nwm_mem031_i_mem_prty",
+ "nwm_mem039_i_mem_prty",
+ "nwm_mem047_i_mem_prty",
+ "nwm_mem024_i_mem_prty",
+ "nwm_mem032_i_mem_prty",
+ "nwm_mem040_i_mem_prty",
+ "nwm_mem048_i_mem_prty",
+ "nwm_mem018_i_mem_prty",
+ "nwm_mem026_i_mem_prty",
+ "nwm_mem034_i_mem_prty",
+ "nwm_mem042_i_mem_prty",
+ "nwm_mem017_i_mem_prty",
+ "nwm_mem025_i_mem_prty",
+ "nwm_mem033_i_mem_prty",
+ "nwm_mem041_i_mem_prty",
+ "nwm_mem021_i_mem_prty",
+ "nwm_mem029_i_mem_prty",
+ "nwm_mem037_i_mem_prty",
+ "nwm_mem045_i_mem_prty",
+ "nwm_mem019_i_mem_prty",
+ "nwm_mem027_i_mem_prty",
+ "nwm_mem035_i_mem_prty",
+ "nwm_mem043_i_mem_prty",
+ "nwm_mem022_i_mem_prty",
+ "nwm_mem030_i_mem_prty",
+ "nwm_mem038_i_mem_prty",
+ "nwm_mem046_i_mem_prty",
+ "nwm_mem057_i_mem_prty",
+ "nwm_mem059_i_mem_prty",
+ "nwm_mem061_i_mem_prty",
+ "nwm_mem063_i_mem_prty",
+ "nwm_mem058_i_mem_prty",
+ "nwm_mem060_i_mem_prty",
+ "nwm_mem062_i_mem_prty",
+ "nwm_mem064_i_mem_prty",
+ "nwm_mem009_i_mem_prty",
+ "nwm_mem010_i_mem_prty",
+ "nwm_mem011_i_mem_prty",
+ "nwm_mem012_i_mem_prty",
+ "nwm_mem013_i_mem_prty",
+ "nwm_mem014_i_mem_prty",
+ "nwm_mem015_i_mem_prty",
+ "nwm_mem016_i_mem_prty",
+ "nwm_mem001_i_mem_prty",
+ "nwm_mem002_i_mem_prty",
+ "nwm_mem003_i_mem_prty",
+ "nwm_mem004_i_mem_prty",
+ "nwm_mem005_i_mem_prty",
+ "nwm_mem006_i_mem_prty",
+ "nwm_mem007_i_mem_prty",
+ "nwm_mem008_i_mem_prty",
+ "nwm_mem049_i_mem_prty",
+ "nwm_mem053_i_mem_prty",
+ "nwm_mem050_i_mem_prty",
+ "nwm_mem054_i_mem_prty",
+ "nwm_mem051_i_mem_prty",
+ "nwm_mem055_i_mem_prty",
+ "nwm_mem052_i_mem_prty",
+ "nwm_mem056_i_mem_prty",
+ "nwm_mem066_i_mem_prty",
+ "nwm_mem068_i_mem_prty",
+ "nwm_mem070_i_mem_prty",
+ "nwm_mem072_i_mem_prty",
+ "nwm_mem065_i_mem_prty",
+ "nwm_mem067_i_mem_prty",
+ "nwm_mem069_i_mem_prty",
+ "nwm_mem071_i_mem_prty",
+};
+#else
+#define nwm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nwm_prty1_k2_attn_idx[31] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg nwm_prty1_k2 = {
+ 0, 31, nwm_prty1_k2_attn_idx, 0x800200, 0x80020c, 0x800208, 0x800204
+};
+
+static const u16 nwm_prty2_k2_attn_idx[31] = {
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+};
+
+static struct attn_hw_reg nwm_prty2_k2 = {
+ 1, 31, nwm_prty2_k2_attn_idx, 0x800210, 0x80021c, 0x800218, 0x800214
+};
+
+static const u16 nwm_prty3_k2_attn_idx[10] = {
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+};
+
+static struct attn_hw_reg nwm_prty3_k2 = {
+ 2, 10, nwm_prty3_k2_attn_idx, 0x800220, 0x80022c, 0x800228, 0x800224
+};
+
+static struct attn_hw_reg *nwm_prty_k2_regs[3] = {
+ &nwm_prty1_k2, &nwm_prty2_k2, &nwm_prty3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nws_int_attn_desc[38] = {
+ "nws_address_error",
+ "nws_ln0_an_resolve_50g_cr2",
+ "nws_ln0_an_resolve_50g_kr2",
+ "nws_ln0_an_resolve_40g_cr4",
+ "nws_ln0_an_resolve_40g_kr4",
+ "nws_ln0_an_resolve_25g_gr",
+ "nws_ln0_an_resolve_25g_cr",
+ "nws_ln0_an_resolve_25g_kr",
+ "nws_ln0_an_resolve_10g_kr",
+ "nws_ln0_an_resolve_1g_kx",
+ "nws_unused_0",
+ "nws_ln1_an_resolve_50g_cr2",
+ "nws_ln1_an_resolve_50g_kr2",
+ "nws_ln1_an_resolve_40g_cr4",
+ "nws_ln1_an_resolve_40g_kr4",
+ "nws_ln1_an_resolve_25g_gr",
+ "nws_ln1_an_resolve_25g_cr",
+ "nws_ln1_an_resolve_25g_kr",
+ "nws_ln1_an_resolve_10g_kr",
+ "nws_ln1_an_resolve_1g_kx",
+ "nws_ln2_an_resolve_50g_cr2",
+ "nws_ln2_an_resolve_50g_kr2",
+ "nws_ln2_an_resolve_40g_cr4",
+ "nws_ln2_an_resolve_40g_kr4",
+ "nws_ln2_an_resolve_25g_gr",
+ "nws_ln2_an_resolve_25g_cr",
+ "nws_ln2_an_resolve_25g_kr",
+ "nws_ln2_an_resolve_10g_kr",
+ "nws_ln2_an_resolve_1g_kx",
+ "nws_ln3_an_resolve_50g_cr2",
+ "nws_ln3_an_resolve_50g_kr2",
+ "nws_ln3_an_resolve_40g_cr4",
+ "nws_ln3_an_resolve_40g_kr4",
+ "nws_ln3_an_resolve_25g_gr",
+ "nws_ln3_an_resolve_25g_cr",
+ "nws_ln3_an_resolve_25g_kr",
+ "nws_ln3_an_resolve_10g_kr",
+ "nws_ln3_an_resolve_1g_kx",
+};
+#else
+#define nws_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nws_int0_k2_attn_idx[10] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg nws_int0_k2 = {
+ 0, 10, nws_int0_k2_attn_idx, 0x700180, 0x70018c, 0x700188, 0x700184
+};
+
+static const u16 nws_int1_k2_attn_idx[9] = {
+ 11, 12, 13, 14, 15, 16, 17, 18, 19,
+};
+
+static struct attn_hw_reg nws_int1_k2 = {
+ 1, 9, nws_int1_k2_attn_idx, 0x700190, 0x70019c, 0x700198, 0x700194
+};
+
+static const u16 nws_int2_k2_attn_idx[9] = {
+ 20, 21, 22, 23, 24, 25, 26, 27, 28,
+};
+
+static struct attn_hw_reg nws_int2_k2 = {
+ 2, 9, nws_int2_k2_attn_idx, 0x7001a0, 0x7001ac, 0x7001a8, 0x7001a4
+};
+
+static const u16 nws_int3_k2_attn_idx[9] = {
+ 29, 30, 31, 32, 33, 34, 35, 36, 37,
+};
+
+static struct attn_hw_reg nws_int3_k2 = {
+ 3, 9, nws_int3_k2_attn_idx, 0x7001b0, 0x7001bc, 0x7001b8, 0x7001b4
+};
+
+static struct attn_hw_reg *nws_int_k2_regs[4] = {
+ &nws_int0_k2, &nws_int1_k2, &nws_int2_k2, &nws_int3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nws_prty_attn_desc[4] = {
+ "nws_mem003_i_mem_prty",
+ "nws_mem001_i_mem_prty",
+ "nws_mem004_i_mem_prty",
+ "nws_mem002_i_mem_prty",
+};
+#else
+#define nws_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nws_prty1_k2_attn_idx[4] = {
+ 0, 1, 2, 3,
+};
+
+static struct attn_hw_reg nws_prty1_k2 = {
+ 0, 4, nws_prty1_k2_attn_idx, 0x700200, 0x70020c, 0x700208, 0x700204
+};
+
+static struct attn_hw_reg *nws_prty_k2_regs[1] = {
+ &nws_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ms_int_attn_desc[1] = {
+ "ms_address_error",
+};
+#else
+#define ms_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ms_int0_k2_attn_idx[1] = {
+ 0,
+};
+
+static struct attn_hw_reg ms_int0_k2 = {
+ 0, 1, ms_int0_k2_attn_idx, 0x6a0180, 0x6a018c, 0x6a0188, 0x6a0184
+};
+
+static struct attn_hw_reg *ms_int_k2_regs[1] = {
+ &ms_int0_k2,
+};
+
+static struct attn_hw_block attn_blocks[] = {
+ {"grc", grc_int_attn_desc, grc_prty_attn_desc, {
+ {1, 1,
+ grc_int_bb_a0_regs,
+ grc_prty_bb_a0_regs},
+ {1, 1,
+ grc_int_bb_b0_regs,
+ grc_prty_bb_b0_regs},
+ {1, 1, grc_int_k2_regs,
+ grc_prty_k2_regs} } },
+ {"miscs", miscs_int_attn_desc, miscs_prty_attn_desc, {
+ {2, 0,
+
+ miscs_int_bb_a0_regs,
+ OSAL_NULL},
+ {2, 1,
+
+ miscs_int_bb_b0_regs,
+
+ miscs_prty_bb_b0_regs},
+ {1, 1,
+
+ miscs_int_k2_regs,
+
+ miscs_prty_k2_regs } } },
+ {"misc", misc_int_attn_desc, OSAL_NULL, {
+ {1, 0, misc_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, misc_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, misc_int_k2_regs,
+ OSAL_NULL } } },
+ {"dbu", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"pglue_b", pglue_b_int_attn_desc, pglue_b_prty_attn_desc, {
+ {1, 1,
+
+ pglue_b_int_bb_a0_regs,
+
+ pglue_b_prty_bb_a0_regs},
+ {1, 2,
+
+ pglue_b_int_bb_b0_regs,
+
+ pglue_b_prty_bb_b0_regs},
+ {1, 3,
+
+ pglue_b_int_k2_regs,
+
+ pglue_b_prty_k2_regs } } },
+ {"cnig", cnig_int_attn_desc, cnig_prty_attn_desc, {
+ {1, 0,
+ cnig_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ cnig_int_bb_b0_regs,
+
+ cnig_prty_bb_b0_regs},
+ {1, 1,
+ cnig_int_k2_regs,
+
+ cnig_prty_k2_regs } } },
+ {"cpmu", cpmu_int_attn_desc, OSAL_NULL, {
+ {1, 0, cpmu_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, cpmu_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, cpmu_int_k2_regs,
+ OSAL_NULL } } },
+ {"ncsi", ncsi_int_attn_desc, ncsi_prty_attn_desc, {
+ {1, 1,
+ ncsi_int_bb_a0_regs,
+
+ ncsi_prty_bb_a0_regs},
+ {1, 1,
+ ncsi_int_bb_b0_regs,
+
+ ncsi_prty_bb_b0_regs},
+ {1, 1,
+ ncsi_int_k2_regs,
+
+ ncsi_prty_k2_regs } } },
+ {"opte", OSAL_NULL, opte_prty_attn_desc, {
+ {0, 1, OSAL_NULL,
+ opte_prty_bb_a0_regs},
+ {0, 2, OSAL_NULL,
+ opte_prty_bb_b0_regs},
+ {0, 2, OSAL_NULL,
+ opte_prty_k2_regs } } },
+ {"bmb", bmb_int_attn_desc, bmb_prty_attn_desc, {
+ {12, 2,
+ bmb_int_bb_a0_regs,
+ bmb_prty_bb_a0_regs},
+ {12, 3,
+ bmb_int_bb_b0_regs,
+ bmb_prty_bb_b0_regs},
+ {12, 3, bmb_int_k2_regs,
+ bmb_prty_k2_regs } } },
+ {"pcie", pcie_int_attn_desc, pcie_prty_attn_desc, {
+ {0, 1, OSAL_NULL,
+
+ pcie_prty_bb_a0_regs},
+ {0, 1, OSAL_NULL,
+
+ pcie_prty_bb_b0_regs},
+ {1, 2,
+ pcie_int_k2_regs,
+
+ pcie_prty_k2_regs } } },
+ {"mcp", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"mcp2", OSAL_NULL, mcp2_prty_attn_desc, {
+ {0, 2, OSAL_NULL,
+ mcp2_prty_bb_a0_regs},
+ {0, 2, OSAL_NULL,
+ mcp2_prty_bb_b0_regs},
+ {0, 2, OSAL_NULL,
+ mcp2_prty_k2_regs } } },
+ {"pswhst", pswhst_int_attn_desc, pswhst_prty_attn_desc, {
+ {1, 1,
+
+ pswhst_int_bb_a0_regs,
+
+ pswhst_prty_bb_a0_regs},
+ {1, 2,
+
+ pswhst_int_bb_b0_regs,
+
+ pswhst_prty_bb_b0_regs},
+ {1, 2,
+
+ pswhst_int_k2_regs,
+
+ pswhst_prty_k2_regs } } },
+ {"pswhst2", pswhst2_int_attn_desc, pswhst2_prty_attn_desc, {
+ {1, 0,
+
+ pswhst2_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswhst2_int_bb_b0_regs,
+
+ pswhst2_prty_bb_b0_regs},
+ {1, 1,
+
+ pswhst2_int_k2_regs,
+
+ pswhst2_prty_k2_regs } } },
+ {"pswrd", pswrd_int_attn_desc, pswrd_prty_attn_desc, {
+ {1, 0,
+
+ pswrd_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswrd_int_bb_b0_regs,
+
+ pswrd_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrd_int_k2_regs,
+
+ pswrd_prty_k2_regs } } },
+ {"pswrd2", pswrd2_int_attn_desc, pswrd2_prty_attn_desc, {
+ {1, 2,
+
+ pswrd2_int_bb_a0_regs,
+
+ pswrd2_prty_bb_a0_regs},
+ {1, 3,
+
+ pswrd2_int_bb_b0_regs,
+
+ pswrd2_prty_bb_b0_regs},
+ {1, 3,
+
+ pswrd2_int_k2_regs,
+
+ pswrd2_prty_k2_regs } } },
+ {"pswwr", pswwr_int_attn_desc, pswwr_prty_attn_desc, {
+ {1, 0,
+
+ pswwr_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswwr_int_bb_b0_regs,
+
+ pswwr_prty_bb_b0_regs},
+ {1, 1,
+
+ pswwr_int_k2_regs,
+
+ pswwr_prty_k2_regs } } },
+ {"pswwr2", pswwr2_int_attn_desc, pswwr2_prty_attn_desc, {
+ {1, 4,
+
+ pswwr2_int_bb_a0_regs,
+
+ pswwr2_prty_bb_a0_regs},
+ {1, 5,
+
+ pswwr2_int_bb_b0_regs,
+
+ pswwr2_prty_bb_b0_regs},
+ {1, 5,
+
+ pswwr2_int_k2_regs,
+
+ pswwr2_prty_k2_regs } } },
+ {"pswrq", pswrq_int_attn_desc, pswrq_prty_attn_desc, {
+ {1, 0,
+
+ pswrq_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pswrq_int_bb_b0_regs,
+
+ pswrq_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrq_int_k2_regs,
+
+ pswrq_prty_k2_regs } } },
+ {"pswrq2", pswrq2_int_attn_desc, pswrq2_prty_attn_desc, {
+ {1, 1,
+
+ pswrq2_int_bb_a0_regs,
+
+ pswrq2_prty_bb_a0_regs},
+ {1, 1,
+
+ pswrq2_int_bb_b0_regs,
+
+ pswrq2_prty_bb_b0_regs},
+ {1, 1,
+
+ pswrq2_int_k2_regs,
+
+ pswrq2_prty_k2_regs } } },
+ {"pglcs", pglcs_int_attn_desc, OSAL_NULL, {
+ {1, 0, pglcs_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, pglcs_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, pglcs_int_k2_regs,
+ OSAL_NULL } } },
+ {"dmae", dmae_int_attn_desc, dmae_prty_attn_desc, {
+ {1, 1,
+ dmae_int_bb_a0_regs,
+
+ dmae_prty_bb_a0_regs},
+ {1, 1,
+ dmae_int_bb_b0_regs,
+
+ dmae_prty_bb_b0_regs},
+ {1, 1,
+ dmae_int_k2_regs,
+
+ dmae_prty_k2_regs } } },
+ {"ptu", ptu_int_attn_desc, ptu_prty_attn_desc, {
+ {1, 1,
+ ptu_int_bb_a0_regs,
+ ptu_prty_bb_a0_regs},
+ {1, 1,
+ ptu_int_bb_b0_regs,
+ ptu_prty_bb_b0_regs},
+ {1, 1, ptu_int_k2_regs,
+ ptu_prty_k2_regs } } },
+ {"tcm", tcm_int_attn_desc, tcm_prty_attn_desc, {
+ {3, 2,
+ tcm_int_bb_a0_regs,
+ tcm_prty_bb_a0_regs},
+ {3, 2,
+ tcm_int_bb_b0_regs,
+ tcm_prty_bb_b0_regs},
+ {3, 2, tcm_int_k2_regs,
+ tcm_prty_k2_regs } } },
+ {"mcm", mcm_int_attn_desc, mcm_prty_attn_desc, {
+ {3, 2,
+ mcm_int_bb_a0_regs,
+ mcm_prty_bb_a0_regs},
+ {3, 2,
+ mcm_int_bb_b0_regs,
+ mcm_prty_bb_b0_regs},
+ {3, 2, mcm_int_k2_regs,
+ mcm_prty_k2_regs } } },
+ {"ucm", ucm_int_attn_desc, ucm_prty_attn_desc, {
+ {3, 2,
+ ucm_int_bb_a0_regs,
+ ucm_prty_bb_a0_regs},
+ {3, 2,
+ ucm_int_bb_b0_regs,
+ ucm_prty_bb_b0_regs},
+ {3, 2, ucm_int_k2_regs,
+ ucm_prty_k2_regs } } },
+ {"xcm", xcm_int_attn_desc, xcm_prty_attn_desc, {
+ {3, 2,
+ xcm_int_bb_a0_regs,
+ xcm_prty_bb_a0_regs},
+ {3, 2,
+ xcm_int_bb_b0_regs,
+ xcm_prty_bb_b0_regs},
+ {3, 2, xcm_int_k2_regs,
+ xcm_prty_k2_regs } } },
+ {"ycm", ycm_int_attn_desc, ycm_prty_attn_desc, {
+ {3, 2,
+ ycm_int_bb_a0_regs,
+ ycm_prty_bb_a0_regs},
+ {3, 2,
+ ycm_int_bb_b0_regs,
+ ycm_prty_bb_b0_regs},
+ {3, 2, ycm_int_k2_regs,
+ ycm_prty_k2_regs } } },
+ {"pcm", pcm_int_attn_desc, pcm_prty_attn_desc, {
+ {3, 1,
+ pcm_int_bb_a0_regs,
+ pcm_prty_bb_a0_regs},
+ {3, 1,
+ pcm_int_bb_b0_regs,
+ pcm_prty_bb_b0_regs},
+ {3, 1, pcm_int_k2_regs,
+ pcm_prty_k2_regs } } },
+ {"qm", qm_int_attn_desc, qm_prty_attn_desc, {
+ {1, 4, qm_int_bb_a0_regs,
+ qm_prty_bb_a0_regs},
+ {1, 4, qm_int_bb_b0_regs,
+ qm_prty_bb_b0_regs},
+ {1, 4, qm_int_k2_regs,
+ qm_prty_k2_regs } } },
+ {"tm", tm_int_attn_desc, tm_prty_attn_desc, {
+ {2, 1, tm_int_bb_a0_regs,
+ tm_prty_bb_a0_regs},
+ {2, 1, tm_int_bb_b0_regs,
+ tm_prty_bb_b0_regs},
+ {2, 1, tm_int_k2_regs,
+ tm_prty_k2_regs } } },
+ {"dorq", dorq_int_attn_desc, dorq_prty_attn_desc, {
+ {1, 1,
+ dorq_int_bb_a0_regs,
+
+ dorq_prty_bb_a0_regs},
+ {1, 2,
+ dorq_int_bb_b0_regs,
+
+ dorq_prty_bb_b0_regs},
+ {1, 2,
+ dorq_int_k2_regs,
+
+ dorq_prty_k2_regs } } },
+ {"brb", brb_int_attn_desc, brb_prty_attn_desc, {
+ {12, 2,
+ brb_int_bb_a0_regs,
+ brb_prty_bb_a0_regs},
+ {12, 3,
+ brb_int_bb_b0_regs,
+ brb_prty_bb_b0_regs},
+ {12, 3, brb_int_k2_regs,
+ brb_prty_k2_regs } } },
+ {"src", src_int_attn_desc, OSAL_NULL, {
+ {1, 0, src_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 0, src_int_bb_b0_regs,
+ OSAL_NULL},
+ {1, 0, src_int_k2_regs,
+ OSAL_NULL } } },
+ {"prs", prs_int_attn_desc, prs_prty_attn_desc, {
+ {1, 3,
+ prs_int_bb_a0_regs,
+ prs_prty_bb_a0_regs},
+ {1, 3,
+ prs_int_bb_b0_regs,
+ prs_prty_bb_b0_regs},
+ {1, 3, prs_int_k2_regs,
+ prs_prty_k2_regs } } },
+ {"tsdm", tsdm_int_attn_desc, tsdm_prty_attn_desc, {
+ {1, 1,
+ tsdm_int_bb_a0_regs,
+
+ tsdm_prty_bb_a0_regs},
+ {1, 1,
+ tsdm_int_bb_b0_regs,
+
+ tsdm_prty_bb_b0_regs},
+ {1, 1,
+ tsdm_int_k2_regs,
+
+ tsdm_prty_k2_regs } } },
+ {"msdm", msdm_int_attn_desc, msdm_prty_attn_desc, {
+ {1, 1,
+ msdm_int_bb_a0_regs,
+
+ msdm_prty_bb_a0_regs},
+ {1, 1,
+ msdm_int_bb_b0_regs,
+
+ msdm_prty_bb_b0_regs},
+ {1, 1,
+ msdm_int_k2_regs,
+
+ msdm_prty_k2_regs } } },
+ {"usdm", usdm_int_attn_desc, usdm_prty_attn_desc, {
+ {1, 1,
+ usdm_int_bb_a0_regs,
+
+ usdm_prty_bb_a0_regs},
+ {1, 1,
+ usdm_int_bb_b0_regs,
+
+ usdm_prty_bb_b0_regs},
+ {1, 1,
+ usdm_int_k2_regs,
+
+ usdm_prty_k2_regs } } },
+ {"xsdm", xsdm_int_attn_desc, xsdm_prty_attn_desc, {
+ {1, 1,
+ xsdm_int_bb_a0_regs,
+
+ xsdm_prty_bb_a0_regs},
+ {1, 1,
+ xsdm_int_bb_b0_regs,
+
+ xsdm_prty_bb_b0_regs},
+ {1, 1,
+ xsdm_int_k2_regs,
+
+ xsdm_prty_k2_regs } } },
+ {"ysdm", ysdm_int_attn_desc, ysdm_prty_attn_desc, {
+ {1, 1,
+ ysdm_int_bb_a0_regs,
+
+ ysdm_prty_bb_a0_regs},
+ {1, 1,
+ ysdm_int_bb_b0_regs,
+
+ ysdm_prty_bb_b0_regs},
+ {1, 1,
+ ysdm_int_k2_regs,
+
+ ysdm_prty_k2_regs } } },
+ {"psdm", psdm_int_attn_desc, psdm_prty_attn_desc, {
+ {1, 1,
+ psdm_int_bb_a0_regs,
+
+ psdm_prty_bb_a0_regs},
+ {1, 1,
+ psdm_int_bb_b0_regs,
+
+ psdm_prty_bb_b0_regs},
+ {1, 1,
+ psdm_int_k2_regs,
+
+ psdm_prty_k2_regs } } },
+ {"tsem", tsem_int_attn_desc, tsem_prty_attn_desc, {
+ {3, 3,
+ tsem_int_bb_a0_regs,
+
+ tsem_prty_bb_a0_regs},
+ {3, 3,
+ tsem_int_bb_b0_regs,
+
+ tsem_prty_bb_b0_regs},
+ {3, 4,
+ tsem_int_k2_regs,
+
+ tsem_prty_k2_regs } } },
+ {"msem", msem_int_attn_desc, msem_prty_attn_desc, {
+ {3, 2,
+ msem_int_bb_a0_regs,
+
+ msem_prty_bb_a0_regs},
+ {3, 2,
+ msem_int_bb_b0_regs,
+
+ msem_prty_bb_b0_regs},
+ {3, 3,
+ msem_int_k2_regs,
+
+ msem_prty_k2_regs } } },
+ {"usem", usem_int_attn_desc, usem_prty_attn_desc, {
+ {3, 2,
+ usem_int_bb_a0_regs,
+
+ usem_prty_bb_a0_regs},
+ {3, 2,
+ usem_int_bb_b0_regs,
+
+ usem_prty_bb_b0_regs},
+ {3, 3,
+ usem_int_k2_regs,
+
+ usem_prty_k2_regs } } },
+ {"xsem", xsem_int_attn_desc, xsem_prty_attn_desc, {
+ {3, 2,
+ xsem_int_bb_a0_regs,
+
+ xsem_prty_bb_a0_regs},
+ {3, 2,
+ xsem_int_bb_b0_regs,
+
+ xsem_prty_bb_b0_regs},
+ {3, 3,
+ xsem_int_k2_regs,
+
+ xsem_prty_k2_regs } } },
+ {"ysem", ysem_int_attn_desc, ysem_prty_attn_desc, {
+ {3, 2,
+ ysem_int_bb_a0_regs,
+
+ ysem_prty_bb_a0_regs},
+ {3, 2,
+ ysem_int_bb_b0_regs,
+
+ ysem_prty_bb_b0_regs},
+ {3, 3,
+ ysem_int_k2_regs,
+
+ ysem_prty_k2_regs } } },
+ {"psem", psem_int_attn_desc, psem_prty_attn_desc, {
+ {3, 3,
+ psem_int_bb_a0_regs,
+
+ psem_prty_bb_a0_regs},
+ {3, 3,
+ psem_int_bb_b0_regs,
+
+ psem_prty_bb_b0_regs},
+ {3, 4,
+ psem_int_k2_regs,
+
+ psem_prty_k2_regs } } },
+ {"rss", rss_int_attn_desc, rss_prty_attn_desc, {
+ {1, 1,
+ rss_int_bb_a0_regs,
+ rss_prty_bb_a0_regs},
+ {1, 1,
+ rss_int_bb_b0_regs,
+ rss_prty_bb_b0_regs},
+ {1, 1, rss_int_k2_regs,
+ rss_prty_k2_regs } } },
+ {"tmld", tmld_int_attn_desc, tmld_prty_attn_desc, {
+ {1, 1,
+ tmld_int_bb_a0_regs,
+
+ tmld_prty_bb_a0_regs},
+ {1, 1,
+ tmld_int_bb_b0_regs,
+
+ tmld_prty_bb_b0_regs},
+ {1, 1,
+ tmld_int_k2_regs,
+
+ tmld_prty_k2_regs } } },
+ {"muld", muld_int_attn_desc, muld_prty_attn_desc, {
+ {1, 1,
+ muld_int_bb_a0_regs,
+
+ muld_prty_bb_a0_regs},
+ {1, 1,
+ muld_int_bb_b0_regs,
+
+ muld_prty_bb_b0_regs},
+ {1, 1,
+ muld_int_k2_regs,
+
+ muld_prty_k2_regs } } },
+ {"yuld", yuld_int_attn_desc, yuld_prty_attn_desc, {
+ {1, 1,
+ yuld_int_bb_a0_regs,
+
+ yuld_prty_bb_a0_regs},
+ {1, 1,
+ yuld_int_bb_b0_regs,
+
+ yuld_prty_bb_b0_regs},
+ {1, 1,
+ yuld_int_k2_regs,
+
+ yuld_prty_k2_regs } } },
+ {"xyld", xyld_int_attn_desc, xyld_prty_attn_desc, {
+ {1, 1,
+ xyld_int_bb_a0_regs,
+
+ xyld_prty_bb_a0_regs},
+ {1, 1,
+ xyld_int_bb_b0_regs,
+
+ xyld_prty_bb_b0_regs},
+ {1, 1,
+ xyld_int_k2_regs,
+
+ xyld_prty_k2_regs } } },
+ {"prm", prm_int_attn_desc, prm_prty_attn_desc, {
+ {1, 1,
+ prm_int_bb_a0_regs,
+ prm_prty_bb_a0_regs},
+ {1, 2,
+ prm_int_bb_b0_regs,
+ prm_prty_bb_b0_regs},
+ {1, 2, prm_int_k2_regs,
+ prm_prty_k2_regs } } },
+ {"pbf_pb1", pbf_pb1_int_attn_desc, pbf_pb1_prty_attn_desc, {
+ {1, 0,
+
+ pbf_pb1_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pbf_pb1_int_bb_b0_regs,
+
+ pbf_pb1_prty_bb_b0_regs},
+ {1, 1,
+
+ pbf_pb1_int_k2_regs,
+
+ pbf_pb1_prty_k2_regs } } },
+ {"pbf_pb2", pbf_pb2_int_attn_desc, pbf_pb2_prty_attn_desc, {
+ {1, 0,
+
+ pbf_pb2_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+
+ pbf_pb2_int_bb_b0_regs,
+
+ pbf_pb2_prty_bb_b0_regs},
+ {1, 1,
+
+ pbf_pb2_int_k2_regs,
+
+ pbf_pb2_prty_k2_regs } } },
+ {"rpb", rpb_int_attn_desc, rpb_prty_attn_desc, {
+ {1, 0,
+ rpb_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ rpb_int_bb_b0_regs,
+ rpb_prty_bb_b0_regs},
+ {1, 1, rpb_int_k2_regs,
+ rpb_prty_k2_regs } } },
+ {"btb", btb_int_attn_desc, btb_prty_attn_desc, {
+ {11, 1,
+ btb_int_bb_a0_regs,
+ btb_prty_bb_a0_regs},
+ {11, 2,
+ btb_int_bb_b0_regs,
+ btb_prty_bb_b0_regs},
+ {11, 2, btb_int_k2_regs,
+ btb_prty_k2_regs } } },
+ {"pbf", pbf_int_attn_desc, pbf_prty_attn_desc, {
+ {1, 2,
+ pbf_int_bb_a0_regs,
+ pbf_prty_bb_a0_regs},
+ {1, 3,
+ pbf_int_bb_b0_regs,
+ pbf_prty_bb_b0_regs},
+ {1, 3, pbf_int_k2_regs,
+ pbf_prty_k2_regs } } },
+ {"rdif", rdif_int_attn_desc, rdif_prty_attn_desc, {
+ {1, 0,
+ rdif_int_bb_a0_regs,
+ OSAL_NULL},
+ {1, 1,
+ rdif_int_bb_b0_regs,
+
+ rdif_prty_bb_b0_regs},
+ {1, 1,
+ rdif_int_k2_regs,
+
+ rdif_prty_k2_regs } } },
+ {"tdif", tdif_int_attn_desc, tdif_prty_attn_desc, {
+ {1, 1,
+ tdif_int_bb_a0_regs,
+
+ tdif_prty_bb_a0_regs},
+ {1, 2,
+ tdif_int_bb_b0_regs,
+
+ tdif_prty_bb_b0_regs},
+ {1, 2,
+ tdif_int_k2_regs,
+
+ tdif_prty_k2_regs } } },
+ {"cdu", cdu_int_attn_desc, cdu_prty_attn_desc, {
+ {1, 1,
+ cdu_int_bb_a0_regs,
+ cdu_prty_bb_a0_regs},
+ {1, 1,
+ cdu_int_bb_b0_regs,
+ cdu_prty_bb_b0_regs},
+ {1, 1, cdu_int_k2_regs,
+ cdu_prty_k2_regs } } },
+ {"ccfc", ccfc_int_attn_desc, ccfc_prty_attn_desc, {
+ {1, 2,
+ ccfc_int_bb_a0_regs,
+
+ ccfc_prty_bb_a0_regs},
+ {1, 2,
+ ccfc_int_bb_b0_regs,
+
+ ccfc_prty_bb_b0_regs},
+ {1, 2,
+ ccfc_int_k2_regs,
+
+ ccfc_prty_k2_regs } } },
+ {"tcfc", tcfc_int_attn_desc, tcfc_prty_attn_desc, {
+ {1, 2,
+ tcfc_int_bb_a0_regs,
+
+ tcfc_prty_bb_a0_regs},
+ {1, 2,
+ tcfc_int_bb_b0_regs,
+
+ tcfc_prty_bb_b0_regs},
+ {1, 2,
+ tcfc_int_k2_regs,
+
+ tcfc_prty_k2_regs } } },
+ {"igu", igu_int_attn_desc, igu_prty_attn_desc, {
+ {1, 3,
+ igu_int_bb_a0_regs,
+ igu_prty_bb_a0_regs},
+ {1, 3,
+ igu_int_bb_b0_regs,
+ igu_prty_bb_b0_regs},
+ {1, 2, igu_int_k2_regs,
+ igu_prty_k2_regs } } },
+ {"cau", cau_int_attn_desc, cau_prty_attn_desc, {
+ {1, 1,
+ cau_int_bb_a0_regs,
+ cau_prty_bb_a0_regs},
+ {1, 1,
+ cau_int_bb_b0_regs,
+ cau_prty_bb_b0_regs},
+ {1, 1, cau_int_k2_regs,
+ cau_prty_k2_regs } } },
+ {"umac", umac_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, umac_int_k2_regs,
+ OSAL_NULL } } },
+ {"xmac", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"dbg", dbg_int_attn_desc, dbg_prty_attn_desc, {
+ {1, 1,
+ dbg_int_bb_a0_regs,
+ dbg_prty_bb_a0_regs},
+ {1, 1,
+ dbg_int_bb_b0_regs,
+ dbg_prty_bb_b0_regs},
+ {1, 1, dbg_int_k2_regs,
+ dbg_prty_k2_regs } } },
+ {"nig", nig_int_attn_desc, nig_prty_attn_desc, {
+ {6, 4,
+ nig_int_bb_a0_regs,
+ nig_prty_bb_a0_regs},
+ {6, 5,
+ nig_int_bb_b0_regs,
+ nig_prty_bb_b0_regs},
+ {10, 5, nig_int_k2_regs,
+ nig_prty_k2_regs } } },
+ {"wol", wol_int_attn_desc, wol_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {1, 1, wol_int_k2_regs,
+ wol_prty_k2_regs } } },
+ {"bmbn", bmbn_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, bmbn_int_k2_regs,
+ OSAL_NULL } } },
+ {"ipc", ipc_int_attn_desc, ipc_prty_attn_desc, {
+ {1, 1,
+ ipc_int_bb_a0_regs,
+ ipc_prty_bb_a0_regs},
+ {1, 1,
+ ipc_int_bb_b0_regs,
+ ipc_prty_bb_b0_regs},
+ {1, 0, ipc_int_k2_regs,
+ OSAL_NULL } } },
+ {"nwm", nwm_int_attn_desc, nwm_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {1, 3, nwm_int_k2_regs,
+ nwm_prty_k2_regs } } },
+ {"nws", nws_int_attn_desc, nws_prty_attn_desc, {
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {0, 0, OSAL_NULL,
+ OSAL_NULL},
+ {4, 1, nws_int_k2_regs,
+ nws_prty_k2_regs } } },
+ {"ms", ms_int_attn_desc, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {1, 0, ms_int_k2_regs,
+ OSAL_NULL } } },
+ {"phy_pcie", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"misc_aeu", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+ {"bar0_map", OSAL_NULL, OSAL_NULL, {
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL},
+ {0, 0, OSAL_NULL, OSAL_NULL } } },
+};
+
+#define NUM_INT_REGS 423
+#define NUM_PRTY_REGS 378
+
+#endif /* __PREVENT_INT_ATTN__ */
+
+#endif /* __ATTN_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
new file mode 100644
index 00000000..c5734490
--- /dev/null
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -0,0 +1,724 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_CHAIN_H__
+#define __ECORE_CHAIN_H__
+
+#include <assert.h> /* @DPDK */
+
+#include "common_hsi.h"
+#include "ecore_utils.h"
+
+enum ecore_chain_mode {
+ /* Each Page contains a next pointer at its end */
+ ECORE_CHAIN_MODE_NEXT_PTR,
+
+ /* Chain is a single page (next ptr) is unrequired */
+ ECORE_CHAIN_MODE_SINGLE,
+
+ /* Page pointers are located in a side list */
+ ECORE_CHAIN_MODE_PBL,
+};
+
+enum ecore_chain_use_mode {
+ ECORE_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
+ ECORE_CHAIN_USE_TO_CONSUME, /* Chain starts full */
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
+};
+
+enum ecore_chain_cnt_type {
+ /* The chain's size/prod/cons are kept in 16-bit variables */
+ ECORE_CHAIN_CNT_TYPE_U16,
+
+ /* The chain's size/prod/cons are kept in 32-bit variables */
+ ECORE_CHAIN_CNT_TYPE_U32,
+};
+
+struct ecore_chain_next {
+ struct regpair next_phys;
+ void *next_virt;
+};
+
+struct ecore_chain_pbl_u16 {
+ u16 prod_page_idx;
+ u16 cons_page_idx;
+};
+
+struct ecore_chain_pbl_u32 {
+ u32 prod_page_idx;
+ u32 cons_page_idx;
+};
+
+struct ecore_chain_pbl {
+ /* Base address of a pre-allocated buffer for pbl */
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
+
+ /* Index to current used page by producer/consumer */
+ union {
+ struct ecore_chain_pbl_u16 pbl16;
+ struct ecore_chain_pbl_u32 pbl32;
+ } u;
+};
+
+struct ecore_chain_u16 {
+ /* Cyclic index of next element to produce/consme */
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct ecore_chain_u32 {
+ /* Cyclic index of next element to produce/consme */
+ u32 prod_idx;
+ u32 cons_idx;
+};
+
+struct ecore_chain {
+ /* Address of first page of the chain */
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+
+ /* Point to next element to produce/consume */
+ void *p_prod_elem;
+ void *p_cons_elem;
+
+ enum ecore_chain_mode mode;
+ enum ecore_chain_use_mode intended_use;
+
+ enum ecore_chain_cnt_type cnt_type;
+ union {
+ struct ecore_chain_u16 chain16;
+ struct ecore_chain_u32 chain32;
+ } u;
+
+ u32 page_cnt;
+
+ /* Number of elements - capacity is for usable elements only,
+ * while size will contain total number of elements [for entire chain].
+ */
+ u32 capacity;
+ u32 size;
+
+ /* Elements information for fast calculations */
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_unusable;
+ u16 usable_per_page;
+ u16 elem_size;
+ u16 next_page_mask;
+
+ struct ecore_chain_pbl pbl;
+};
+
+#define ECORE_CHAIN_PBL_ENTRY_SIZE (8)
+#define ECORE_CHAIN_PAGE_SIZE (0x1000)
+#define ELEMS_PER_PAGE(elem_size) (ECORE_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \
+ (1 + ((sizeof(struct ecore_chain_next) - 1) / \
+ (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ ((u32)(ELEMS_PER_PAGE(elem_size) - \
+ UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
+ DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+#define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
+
+/* Accessors */
+static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u16(p_chain));
+ return p_chain->u.chain16.prod_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u32(p_chain));
+ return p_chain->u.chain32.prod_idx;
+}
+
+static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u16(p_chain));
+ return p_chain->u.chain16.cons_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
+{
+ OSAL_ASSERT(is_chain_u32(p_chain));
+ return p_chain->u.chain32.cons_idx;
+}
+
+/* FIXME:
+ * Should create OSALs for the below definitions.
+ * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle
+ * kernel versions that lack them.
+ */
+#define ECORE_U16_MAX ((u16)~0U)
+#define ECORE_U32_MAX ((u32)~0U)
+
+static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
+{
+ u16 used;
+
+ OSAL_ASSERT(is_chain_u16(p_chain));
+
+ used = (u16)(((u32)ECORE_U16_MAX + 1 +
+ (u32)(p_chain->u.chain16.prod_idx)) -
+ (u32)p_chain->u.chain16.cons_idx);
+ if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+
+ return (u16)(p_chain->capacity - used);
+}
+
+static OSAL_INLINE u32
+ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
+{
+ u32 used;
+
+ OSAL_ASSERT(is_chain_u32(p_chain));
+
+ used = (u32)(((u64)ECORE_U32_MAX + 1 +
+ (u64)(p_chain->u.chain32.prod_idx)) -
+ (u64)p_chain->u.chain32.cons_idx);
+ if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+ used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+ p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+
+ return p_chain->capacity - used;
+}
+
+static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ return (ecore_chain_get_elem_left(p_chain) ==
+ p_chain->capacity);
+ else
+ return (ecore_chain_get_elem_left_u32(p_chain) ==
+ p_chain->capacity);
+}
+
+static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ return (ecore_chain_get_elem_left(p_chain) == 0);
+ else
+ return (ecore_chain_get_elem_left_u32(p_chain) == 0);
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->elem_per_page;
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->usable_per_page;
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
+{
+ return p_chain->elem_unusable;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
+{
+ return p_chain->size;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
+{
+ return p_chain->page_cnt;
+}
+
+static OSAL_INLINE
+dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
+{
+ return p_chain->pbl.p_phys_table;
+}
+
+/**
+ * @brief ecore_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static OSAL_INLINE void
+ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
+ void *idx_to_inc, void *page_to_inc)
+{
+ struct ecore_chain_next *p_next = OSAL_NULL;
+ u32 page_index = 0;
+
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ p_next = (struct ecore_chain_next *)(*p_next_elem);
+ *p_next_elem = p_next->next_virt;
+ if (is_chain_u16(p_chain))
+ *(u16 *)idx_to_inc += p_chain->elem_unusable;
+ else
+ *(u32 *)idx_to_inc += p_chain->elem_unusable;
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ *p_next_elem = p_chain->p_virt_addr;
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ if (is_chain_u16(p_chain)) {
+ if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
+ *(u16 *)page_to_inc = 0;
+ page_index = *(u16 *)page_to_inc;
+ } else {
+ if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
+ *(u32 *)page_to_inc = 0;
+ page_index = *(u32 *)page_to_inc;
+ }
+ *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+ }
+}
+
+#define is_unusable_idx(p, idx) \
+ (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_idx_u32(p, idx) \
+ (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx) \
+ ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
+ (p)->usable_per_page)
+
+#define is_unusable_next_idx_u32(p, idx) \
+ ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) \
+ == (p)->usable_per_page)
+
+#define test_and_skip(p, idx) \
+ do { \
+ if (is_chain_u16(p)) { \
+ if (is_unusable_idx(p, idx)) \
+ (p)->u.chain16.idx += (p)->elem_unusable; \
+ } else { \
+ if (is_unusable_idx_u32(p, idx)) \
+ (p)->u.chain32.idx += (p)->elem_unusable; \
+ } \
+ } while (0)
+
+/**
+ * @brief ecore_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static OSAL_INLINE
+void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx += (u16)num;
+ else
+ p_chain->u.chain32.cons_idx += num;
+ test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx++;
+ else
+ p_chain->u.chain32.cons_idx++;
+ test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
+{
+ void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain16.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain16.prod_idx++;
+ } else {
+ if ((p_chain->u.chain32.prod_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_prod_idx = &p_chain->u.chain32.prod_idx;
+ p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+ p_prod_idx, p_prod_page_idx);
+ }
+ p_chain->u.chain32.prod_idx++;
+ }
+
+ p_ret = p_chain->p_prod_elem;
+ p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+ p_chain->elem_size);
+
+ return p_ret;
+}
+
+/**
+ * @brief ecore_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return number of unusable BDs
+ */
+static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain)
+{
+ return p_chain->capacity;
+}
+
+/**
+ * @brief ecore_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE
+void ecore_chain_recycle_consumed(struct ecore_chain *p_chain)
+{
+ test_and_skip(p_chain, prod_idx);
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx++;
+ else
+ p_chain->u.chain32.prod_idx++;
+}
+
+/**
+ * @brief ecore_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
+{
+ void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx;
+
+ if (is_chain_u16(p_chain)) {
+ if ((p_chain->u.chain16.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain16.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain16.cons_idx++;
+ } else {
+ if ((p_chain->u.chain32.cons_idx &
+ p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+ p_cons_idx = &p_chain->u.chain32.cons_idx;
+ p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
+ ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+ p_cons_idx, p_cons_page_idx);
+ }
+ p_chain->u.chain32.cons_idx++;
+ }
+
+ p_ret = p_chain->p_cons_elem;
+ p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+ p_chain->elem_size);
+
+ return p_ret;
+}
+
+/**
+ * @brief ecore_chain_reset -
+ *
+ * Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
+{
+ u32 i;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->u.chain16.prod_idx = 0;
+ p_chain->u.chain16.cons_idx = 0;
+ } else {
+ p_chain->u.chain32.prod_idx = 0;
+ p_chain->u.chain32.cons_idx = 0;
+ }
+ p_chain->p_cons_elem = p_chain->p_virt_addr;
+ p_chain->p_prod_elem = p_chain->p_virt_addr;
+
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+ * indices, to avoid unnecessary page advancing on the first
+ * call to ecore_chain_produce/consume. Instead, the indices
+ * will be advanced to page_cnt and then will be wrapped to 0.
+ */
+ u32 reset_val = p_chain->page_cnt - 1;
+
+ if (is_chain_u16(p_chain)) {
+ p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+ } else {
+ p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
+ p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+ }
+ }
+
+ switch (p_chain->intended_use) {
+ case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case ECORE_CHAIN_USE_TO_PRODUCE:
+ /* Do nothing */
+ break;
+
+ case ECORE_CHAIN_USE_TO_CONSUME:
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
+ ecore_chain_recycle_consumed(p_chain);
+ break;
+ }
+}
+
+/**
+ * @brief ecore_chain_init_params -
+ *
+ * Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param page_cnt number of pages in the allocated buffer
+ * @param elem_size size of each element in the chain
+ * @param intended_use
+ * @param mode
+ * @param cnt_type
+ */
+static OSAL_INLINE void
+ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type)
+{
+ /* chain fixed parameters */
+ p_chain->p_virt_addr = OSAL_NULL;
+ p_chain->p_phys_addr = 0;
+ p_chain->elem_size = elem_size;
+ p_chain->intended_use = intended_use;
+ p_chain->mode = mode;
+ p_chain->cnt_type = cnt_type;
+
+ p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+ p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
+ p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+ p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+ p_chain->next_page_mask = (p_chain->usable_per_page &
+ p_chain->elem_per_page_mask);
+
+ p_chain->page_cnt = page_cnt;
+ p_chain->capacity = p_chain->usable_per_page * page_cnt;
+ p_chain->size = p_chain->elem_per_page * page_cnt;
+
+ p_chain->pbl.p_phys_table = 0;
+ p_chain->pbl.p_virt_table = OSAL_NULL;
+ p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
+}
+
+/**
+ * @brief ecore_chain_init_mem -
+ *
+ * Initalizes a basic chain struct with its chain buffers
+ *
+ * @param p_chain
+ * @param p_virt_addr virtual address of allocated buffer's beginning
+ * @param p_phys_addr physical address of allocated buffer's beginning
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain,
+ void *p_virt_addr,
+ dma_addr_t p_phys_addr)
+{
+ p_chain->p_virt_addr = p_virt_addr;
+ p_chain->p_phys_addr = p_phys_addr;
+}
+
+/**
+ * @brief ecore_chain_init_pbl_mem -
+ *
+ * Initalizes a basic chain struct with its pbl buffers
+ *
+ * @param p_chain
+ * @param p_virt_pbl pointer to a pre allocated side table which will hold
+ * virtual page addresses.
+ * @param p_phys_pbl pointer to a pre-allocated side table which will hold
+ * physical page addresses.
+ * @param pp_virt_addr_tbl
+ * pointer to a pre-allocated side table which will hold
+ * the virtual addresses of the chain pages.
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
+ void *p_virt_pbl,
+ dma_addr_t p_phys_pbl,
+ void **pp_virt_addr_tbl)
+{
+ p_chain->pbl.p_phys_table = p_phys_pbl;
+ p_chain->pbl.p_virt_table = p_virt_pbl;
+ p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+}
+
+/**
+ * @brief ecore_chain_init_next_ptr_elem -
+ *
+ * Initalizes a next pointer element
+ *
+ * @param p_chain
+ * @param p_virt_curr virtual address of a chain page of which the next
+ * pointer element is initialized
+ * @param p_virt_next virtual address of the next chain page
+ * @param p_phys_next physical address of the next chain page
+ *
+ */
+static OSAL_INLINE void
+ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr,
+ void *p_virt_next, dma_addr_t p_phys_next)
+{
+ struct ecore_chain_next *p_next;
+ u32 size;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size);
+
+ DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
+
+ p_next->next_virt = p_virt_next;
+}
+
+/**
+ * @brief ecore_chain_get_last_elem -
+ *
+ * Returns a pointer to the last element of the chain
+ *
+ * @param p_chain
+ *
+ * @return void*
+ */
+static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
+{
+ struct ecore_chain_next *p_next = OSAL_NULL;
+ void *p_virt_addr = OSAL_NULL;
+ u32 size, last_page_idx;
+
+ if (!p_chain->p_virt_addr)
+ goto out;
+
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ size = p_chain->elem_size * p_chain->usable_per_page;
+ p_virt_addr = p_chain->p_virt_addr;
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size);
+ while (p_next->next_virt != p_chain->p_virt_addr) {
+ p_virt_addr = p_next->next_virt;
+ p_next =
+ (struct ecore_chain_next *)((u8 *)p_virt_addr +
+ size);
+ }
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ p_virt_addr = p_chain->p_virt_addr;
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ last_page_idx = p_chain->page_cnt - 1;
+ p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ break;
+ }
+ /* p_virt_addr points at this stage to the last page of the chain */
+ size = p_chain->elem_size * (p_chain->usable_per_page - 1);
+ p_virt_addr = ((u8 *)p_virt_addr + size);
+out:
+ return p_virt_addr;
+}
+
+/**
+ * @brief ecore_chain_set_prod - sets the prod to the given value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
+ u32 prod_idx, void *p_prod_elem)
+{
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.prod_idx = (u16)prod_idx;
+ else
+ p_chain->u.chain32.prod_idx = prod_idx;
+ p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief ecore_chain_pbl_zero_mem - set chain memory to 0
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
+{
+ u32 i, page_cnt;
+
+ if (p_chain->mode != ECORE_CHAIN_MODE_PBL)
+ return;
+
+ page_cnt = ecore_chain_get_page_cnt(p_chain);
+
+ for (i = 0; i < page_cnt; i++)
+ OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
+ ECORE_CHAIN_PAGE_SIZE);
+}
+
+#endif /* __ECORE_CHAIN_H__ */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
new file mode 100644
index 00000000..1201c1a9
--- /dev/null
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -0,0 +1,1961 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_rt_defs.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_init_ops.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_hw.h"
+#include "ecore_dev_api.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT)
+
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT 7
+#define TM_ALIGN (1 << TM_SHIFT)
+#define TM_ELEM_SIZE 4
+
+/* ILT constants */
+/* If for some reason, HW P size is modified to be less than 32K,
+ * special handling needs to be made for CDU initialization
+ */
+#define ILT_DEFAULT_HW_P_SIZE 3
+
+#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT 0
+#define ILT_ENTRY_VALID_MASK 0x1ULL
+#define ILT_ENTRY_VALID_SHIFT 52
+#define ILT_ENTRY_IN_REGS 2
+#define ILT_REG_SIZE_IN_BYTES 4
+
+/* connection context union */
+union conn_context {
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+};
+
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
+#define CONN_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct ecore_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
+struct ecore_conn_type_cfg {
+ u32 cid_count;
+ u32 cid_start;
+ u32 cids_per_vf;
+ struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
+
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct ecore_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+ u32 dynamic_line_cnt;
+};
+
+struct ecore_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
+};
+
+/* Per Path -
+ * ILT shadow table
+ * Protocol acquired CID lists
+ * PF start line in ILT
+ */
+struct ecore_dma_mem {
+ dma_addr_t p_phys;
+ void *p_virt;
+ osal_size_t size;
+};
+
+#define MAP_WORD_SIZE sizeof(unsigned long)
+#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
+
+struct ecore_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct ecore_cxt_mngr {
+ /* Per protocl configuration */
+ struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
+
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+
+ /* Acquired CIDs */
+ struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ /* ILT shadow table */
+ struct ecore_dma_mem *ilt_shadow;
+ u32 pf_start_line;
+
+ /* SRC T2 */
+ struct ecore_dma_mem *t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
+};
+
+/* check if resources/configuration is required according to protocol type */
+static OSAL_INLINE bool src_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_TOE;
+}
+
+static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_TOE;
+}
+
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct ecore_cdu_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_cdu_iids *iids)
+{
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Searcher block configuration */
+struct ecore_src_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_src_iids *iids)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (!src_proto(i))
+ continue;
+
+ iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Timers block configuration */
+struct ecore_tm_iids {
+ u32 pf_cids;
+ u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
+ u32 pf_tids_total;
+ u32 per_vf_cids;
+ u32 per_vf_tids;
+};
+
+static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_tm_iids *iids)
+{
+ u32 i, j;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+ if (tm_cid_proto(i)) {
+ iids->pf_cids += p_cfg->cid_count;
+ iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+ }
+
+ iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
+ iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
+ iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
+
+ for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+ iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
+ iids->pf_tids_total += iids->pf_tids[j];
+ }
+}
+
+void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_tid_seg *segs;
+ u32 vf_cids = 0, type, j;
+ u32 vf_tids = 0;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->cids += p_mngr->conn_cfg[type].cid_count;
+ vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+ segs = p_mngr->conn_cfg[type].tid_seg;
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->tids += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+
+ iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->tids += vf_tids * p_mngr->vf_count;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+ iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
+ u32 seg)
+{
+ struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ /* Find the protocol with tid count > 0 for this segment.
+ * Note: there can only be one and this is already validated.
+ */
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+ return &p_cfg->conn_cfg[i].tid_seg[seg];
+ }
+ return OSAL_NULL;
+}
+
+/* set the iids (cid/tid) count per protocol */
+void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_count, u32 vf_cid_cnt)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+ p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
+ p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
+}
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid)
+{
+ if (vf_cid)
+ *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+ return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+}
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+static u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ u32 cnt = 0;
+ int i;
+
+ for (i = 0; i < TASK_SEGMENTS; i++)
+ cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+ return cnt;
+}
+
+static OSAL_INLINE void
+ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ p_seg->count = count;
+ p_seg->has_fl_mem = has_fl;
+ p_seg->type = seg_type;
+}
+
+/* the *p_line parameter must be either 0 for the first invocation or the
+ * value returned in the previous invocation.
+ */
+static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
+ struct ecore_ilt_cli_blk *p_blk,
+ u32 start_line,
+ u32 total_size, u32 elem_size)
+{
+ u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ /* verfiy called once for each block */
+ if (p_blk->total_size)
+ return;
+
+ p_blk->total_size = total_size;
+ p_blk->real_size_in_page = 0;
+ if (elem_size)
+ p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+ p_blk->start_line = start_line;
+}
+
+static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
+ struct ecore_ilt_client_cfg *p_cli,
+ struct ecore_ilt_cli_blk *p_blk,
+ u32 *p_line, enum ilt_clients client_id)
+{
+ if (!p_blk->total_size)
+ return;
+
+ if (!p_cli->active)
+ p_cli->first.val = *p_line;
+
+ p_cli->active = true;
+ *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+ p_cli->last.val = *p_line - 1;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+ client_id, p_cli->first.val, p_cli->last.val,
+ p_blk->total_size, p_blk->real_size_in_page,
+ p_blk->start_line);
+}
+
+static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+ struct ecore_ilt_client_cfg *p_cli;
+ u32 lines_to_skip = 0;
+ u32 cxts_per_p;
+
+ /* TBD MK: ILT code should be simplified once PROTO enum is changed */
+
+ if (ilt_client == ILT_CLI_CDUC) {
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+ cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+ (u32)CONN_CXT_SIZE(p_hwfn);
+
+ lines_to_skip = cid_count / cxts_per_p;
+ }
+
+ return lines_to_skip;
+}
+
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 curr_line, total, i, task_size, line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ struct ecore_cdu_iids cdu_iids;
+ struct ecore_src_iids src_iids;
+ struct ecore_qm_iids qm_iids;
+ struct ecore_tm_iids tm_iids;
+ struct ecore_tid_seg *p_seg;
+
+ OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
+ OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+ OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+
+ p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+ p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+ /* CDUC */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+
+ curr_line = p_mngr->pf_start_line;
+
+ /* CDUC PF */
+ p_cli->pf_total_lines = 0;
+
+ /* get the counters for the CDUC,CDUC and QM clients */
+ ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
+
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+ total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
+ ILT_CLI_CDUC);
+
+ /* CDUC VF */
+ p_blk = &p_cli->vf_blks[CDUC_BLK];
+ total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUC);
+
+ /* CDUT PF */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli->first.val = curr_line;
+
+ /* first the 'working' task memory */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+
+ /* next the 'init' task memory (forced load memory) */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+ if (!p_seg->has_fl_mem) {
+ /* The segment is active (total size pf 'working'
+ * memory is > 0) but has no FL (forced-load, Init)
+ * memory. Thus:
+ *
+ * 1. The total-size in the corrsponding FL block of
+ * the ILT client is set to 0 - No ILT line are
+ * provisioned and no ILT memory allocated.
+ *
+ * 2. The start-line of said block is set to the
+ * start line of the matching working memory
+ * block in the ILT client. This is later used to
+ * configure the CDU segment offset registers and
+ * results in an FL command for TIDs of this
+ * segment behaves as regular load commands
+ * (loading TIDs from the working memory).
+ */
+ line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ continue;
+ }
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+ /* CDUT VF */
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+ if (p_seg && p_seg->count) {
+ /* Stricly speaking we need to iterate over all VF
+ * task segment types, but a VF has only 1 segment
+ */
+
+ /* 'working' memory */
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ /* 'init' memory */
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ if (!p_seg->has_fl_mem) {
+ /* see comment above */
+ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ } else {
+ task_size = p_mngr->task_type_size[p_seg->type];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total, task_size);
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->vf_total_lines = curr_line -
+ p_cli->vf_blks[0].start_line;
+
+ /* Now for the rest of the VFs */
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ }
+
+ /* QM */
+ p_cli = &p_mngr->clients[ILT_CLI_QM];
+ p_blk = &p_cli->pf_blks[0];
+
+ ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+ total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ qm_iids.vf_cids, qm_iids.tids,
+ p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_vf_pqs);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
+ " num_vf_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
+ p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
+ QM_PQ_ELEMENT_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ /* SRC */
+ p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ ecore_cxt_src_iids(p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (total) {
+ u32 local_max = OSAL_MAX_T(u32, total,
+ SRC_MIN_NUM_ELEMS);
+
+ total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
+
+ p_blk = &p_cli->pf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * sizeof(struct src_ent),
+ sizeof(struct src_ent));
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM PF */
+ p_cli = &p_mngr->clients[ILT_CLI_TM];
+ ecore_cxt_tm_iids(p_mngr, &tm_iids);
+ total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+ if (total) {
+ p_blk = &p_cli->pf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM VF */
+ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+ if (total) {
+ p_blk = &p_cli->vf_blks[0];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ }
+ }
+
+ if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+ RESC_NUM(p_hwfn, ECORE_ILT)) {
+ DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+ curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ if (!p_mngr->t2)
+ return;
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++)
+ if (p_mngr->t2[i].p_virt)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_mngr->t2[i].p_virt,
+ p_mngr->t2[i].p_phys,
+ p_mngr->t2[i].size);
+
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
+ p_mngr->t2 = OSAL_NULL;
+}
+
+static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_num, total_size, ent_per_page, psz, i;
+ struct ecore_ilt_client_cfg *p_src;
+ struct ecore_src_iids src_iids;
+ struct ecore_dma_mem *p_t2;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+
+ /* if the SRC ILT client is inactive - there are no connection
+ * requiring the searcer, leave.
+ */
+ p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+ if (!p_src->active)
+ return ECORE_SUCCESS;
+
+ ecore_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ total_size = conn_num * sizeof(struct src_ent);
+
+ /* use the same page size as the SRC ILT client */
+ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+ p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+ /* allocate t2 */
+ p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ p_mngr->t2_num_pages *
+ sizeof(struct ecore_dma_mem));
+ if (!p_mngr->t2) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
+ rc = ECORE_NOMEM;
+ goto t2_fail;
+ }
+
+ /* allocate t2 pages */
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 size = OSAL_MIN_T(u32, total_size, psz);
+ void **p_virt = &p_mngr->t2[i].p_virt;
+
+ *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_mngr->t2[i].p_phys, size);
+ if (!p_mngr->t2[i].p_virt) {
+ rc = ECORE_NOMEM;
+ goto t2_fail;
+ }
+ OSAL_MEM_ZERO(*p_virt, size);
+ p_mngr->t2[i].size = size;
+ total_size -= size;
+ }
+
+ /* Set the t2 pointers */
+
+ /* entries per page - must be a power of two */
+ ent_per_page = psz / sizeof(struct src_ent);
+
+ p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
+
+ p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+ p_mngr->last_free = (u64)p_t2->p_phys +
+ ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
+ struct src_ent *entries = p_mngr->t2[i].p_virt;
+ u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
+ u32 j;
+
+ for (j = 0; j < ent_num - 1; j++) {
+ val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+ entries[j].next = OSAL_CPU_TO_BE64(val);
+ }
+
+ if (i < p_mngr->t2_num_pages - 1)
+ val = (u64)p_mngr->t2[i + 1].p_phys;
+ else
+ val = 0;
+ entries[j].next = OSAL_CPU_TO_BE64(val);
+
+ conn_num -= ent_per_page;
+ }
+
+ return ECORE_SUCCESS;
+
+t2_fail:
+ ecore_cxt_src_t2_free(p_hwfn);
+ return rc;
+}
+
+/* Total number of ILT lines used by this PF */
+static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
+{
+ u32 size = 0;
+ u32 i;
+
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ if (!ilt_clients[i].active)
+ continue;
+ else
+ size += (ilt_clients[i].last.val -
+ ilt_clients[i].first.val + 1);
+
+ return size;
+}
+
+static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 ilt_size, i;
+
+ ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
+
+ for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+ struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+ if (p_dma->p_virt)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_dma->p_virt,
+ p_dma->p_phys, p_dma->size);
+ p_dma->p_virt = OSAL_NULL;
+ }
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+}
+
+static enum _ecore_status_t
+ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client, u32 start_line_offset)
+{
+ struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left, lines_to_skip = 0;
+
+ /* Special handling for RoCE that supports dynamic allocation */
+ if (ilt_client == ILT_CLI_CDUT)
+ return ECORE_SUCCESS;
+
+ lines_to_skip = p_blk->dynamic_line_cnt;
+
+ if (!p_blk->total_size)
+ return ECORE_SUCCESS;
+
+ sz_left = p_blk->total_size;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+ line = p_blk->start_line + start_line_offset -
+ p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+
+ for (; lines; lines--) {
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 size;
+
+ size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
+
+/* @DPDK */
+#define ILT_BLOCK_ALIGN_SIZE 0x1000
+ p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
+ &p_phys, size,
+ ILT_BLOCK_ALIGN_SIZE);
+ if (!p_virt)
+ return ECORE_NOMEM;
+ OSAL_MEM_ZERO(p_virt, size);
+
+ ilt_shadow[line].p_phys = p_phys;
+ ilt_shadow[line].p_virt = p_virt;
+ ilt_shadow[line].size = size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "ILT shadow: Line [%d] Physical 0x%" PRIx64
+ " Virtual %p Size %d\n",
+ line, (u64)p_phys, p_virt, size);
+
+ sz_left -= size;
+ line++;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_ilt_client_cfg *clients = p_mngr->clients;
+ struct ecore_ilt_cli_blk *p_blk;
+ enum _ecore_status_t rc;
+ u32 size, i, j, k;
+
+ size = ecore_cxt_ilt_shadow_size(clients);
+ p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ size * sizeof(struct ecore_dma_mem));
+
+ if (!p_mngr->ilt_shadow) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate ilt shadow table");
+ rc = ECORE_NOMEM;
+ goto ilt_shadow_fail;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Allocated 0x%x bytes for ilt shadow\n",
+ (u32)(size * sizeof(struct ecore_dma_mem)));
+
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ if (!clients[i].active) {
+ continue;
+ } else {
+ for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+ p_blk = &clients[i].pf_blks[j];
+ rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+ if (rc != ECORE_SUCCESS)
+ goto ilt_shadow_fail;
+ }
+ for (k = 0; k < p_mngr->vf_count; k++) {
+ for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+ u32 lines = clients[i].vf_total_lines * k;
+
+ p_blk = &clients[i].vf_blks[j];
+ rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
+ i, lines);
+ if (rc != ECORE_SUCCESS)
+ goto ilt_shadow_fail;
+ }
+ }
+ }
+
+ return ECORE_SUCCESS;
+
+ilt_shadow_fail:
+ ecore_ilt_shadow_free(p_hwfn);
+ return rc;
+}
+
+static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].max_count = 0;
+ p_mngr->acquired[type].start_cid = 0;
+ }
+}
+
+static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0;
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+ u32 size;
+
+ if (cid_cnt == 0)
+ continue;
+
+ size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD);
+ p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev,
+ GFP_KERNEL, size);
+ if (!p_mngr->acquired[type].cid_map)
+ goto cid_map_fail;
+
+ p_mngr->acquired[type].max_count = cid_cnt;
+ p_mngr->acquired[type].start_cid = start_cid;
+
+ p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_mngr->acquired[type].start_cid,
+ p_mngr->acquired[type].max_count);
+ start_cid += cid_cnt;
+ }
+
+ return ECORE_SUCCESS;
+
+cid_map_fail:
+ ecore_cid_map_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr;
+ u32 i;
+
+ p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
+ if (!p_mngr) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_cxt_mngr'\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Initialize ILT client registers */
+ p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ p_mngr->clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+ p_mngr->clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+ p_mngr->clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+ p_mngr->clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+ p_mngr->clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+ p_mngr->clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+ p_mngr->clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+ /* default ILT page size for all clients is 32K */
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+ /* Initialize task sizes */
+ p_mngr->task_type_size[0] = 512; /* @DPDK */
+ p_mngr->task_type_size[1] = 128; /* @DPDK */
+
+ p_mngr->vf_count = p_hwfn->p_dev->sriov_info.total_vfs;
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t rc;
+
+ /* Allocate the ILT shadow table */
+ rc = ecore_ilt_shadow_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate the T2 table */
+ rc = ecore_cxt_src_t2_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
+ goto tables_alloc_fail;
+ }
+
+ /* Allocate and initialize the acquired cids bitmaps */
+ rc = ecore_cid_map_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
+ goto tables_alloc_fail;
+ }
+
+ return ECORE_SUCCESS;
+
+tables_alloc_fail:
+ ecore_cxt_mngr_free(p_hwfn);
+ return rc;
+}
+
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_cxt_mngr)
+ return;
+
+ ecore_cid_map_free(p_hwfn);
+ ecore_cxt_src_t2_free(p_hwfn);
+ ecore_ilt_shadow_free(p_hwfn);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
+
+ p_hwfn->p_cxt_mngr = OSAL_NULL;
+}
+
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ int type;
+
+ /* Reset acquired cids */
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+ u32 i;
+
+ if (cid_cnt == 0)
+ continue;
+
+ for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++)
+ p_mngr->acquired[type].cid_map[i] = 0;
+ }
+}
+
+/* HW initialization helper (per Block, per phase) */
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+#define CDUT_TYPE0_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
+ CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
+ CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
+ CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
+ CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE1_NCIB_SHIFT)
+
+static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
+{
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+ /* CDUC - connection configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+ cxt_size = CONN_CXT_SIZE(p_hwfn);
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+ SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+ SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-0 tasks configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-1 tasks configuration */
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK 0x1
+#define CDU_SEG_REG_OFFSET_SHIFT 0
+#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_tid_seg *p_seg;
+ u32 cdu_seg_params, offset;
+ int i;
+
+ static const u32 rt_type_offset_arr[] = {
+ CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ static const u32 rt_type_offset_fl_arr[] = {
+ CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+ /* There are initializations only for CDUT during pf Phase */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ /* Segment 0 */
+ p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg)
+ continue;
+
+ /* Note: start_line is already adjusted for the CDU
+ * segment register granularity, so we just need to
+ * divide. Adjustment is implicit as we assume ILT
+ * Page size is larger than 32K!
+ */
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+ }
+}
+
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct ecore_qm_iids iids;
+
+ OSAL_MEM_ZERO(&iids, sizeof(iids));
+ ecore_cxt_qm_iids(p_hwfn, &iids);
+
+ ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
+ p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
+ p_hwfn->first_on_engine,
+ iids.cids, iids.vf_cids, iids.tids,
+ qm_info->start_pq,
+ qm_info->num_pqs - qm_info->num_vf_pqs,
+ qm_info->num_vf_pqs,
+ qm_info->start_vport,
+ qm_info->num_vports, qm_info->pf_wfq,
+ qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
+ p_hwfn->qm_info.qm_vport_params);
+}
+
+/* CM PF */
+static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ union ecore_qm_pq_params pq_params;
+ u16 pq;
+
+ /* XCM pure-LB queue */
+ OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+ return ECORE_SUCCESS;
+}
+
+/* DQ PF */
+static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+ /* Connection types 6 & 7 are not in use, yet they must be configured
+ * as the highest possible connection. Not configuring them means the
+ * defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults will be smaller than dq_pf_max_cid /
+ * dq_vf_max_cid.
+ */
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
+}
+
+static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *ilt_clients;
+ int i;
+
+ ilt_clients = p_hwfn->p_cxt_mngr->clients;
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ if (!ilt_clients[i].active) {
+ continue;
+ } else {
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].first.reg,
+ ilt_clients[i].first.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].last.reg, ilt_clients[i].last.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].p_size.reg,
+ ilt_clients[i].p_size.val);
+ }
+}
+
+static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *p_cli;
+ u32 blk_factor;
+
+ /* For simplicty we set the 'block' to be an ILT page */
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_hwfn->hw_info.first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_hwfn->hw_info.first_vf_in_pf +
+ p_hwfn->p_dev->sriov_info.total_vfs);
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+ blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+}
+
+/* ILT (PSWRQ2) PF */
+static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ilt_client_cfg *clients;
+ struct ecore_cxt_mngr *p_mngr;
+ struct ecore_dma_mem *p_shdw;
+ u32 line, rt_offst, i;
+
+ ecore_ilt_bounds_init(p_hwfn);
+ ecore_ilt_vf_bounds_init(p_hwfn);
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
+ clients = p_hwfn->p_cxt_mngr->clients;
+
+ for (i = 0; i < ILT_CLI_MAX; i++)
+ if (!clients[i].active) {
+ continue;
+ } else {
+ /* Client's 1st val and RT array are absolute, ILT shadows'
+ * lines are relative.
+ */
+ line = clients[i].first.val - p_mngr->pf_start_line;
+ rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+ for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+ line++, rt_offst += ILT_ENTRY_IN_REGS) {
+ u64 ilt_hw_entry = 0;
+
+ /** p_virt could be OSAL_NULL incase of dynamic
+ * allocation
+ */
+ if (p_shdw[line].p_virt != OSAL_NULL) {
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_shdw[line].p_phys >> 12));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Setting RT[0x%08x] from"
+ " ILT[0x%08x] [Client is %d] to"
+ " Physical addr: 0x%" PRIx64 "\n",
+ rt_offst, line, i,
+ (u64)(p_shdw[line].p_phys >> 12));
+ }
+
+ STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+ }
+ }
+}
+
+/* SRC (Searcher) PF */
+static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rounded_conn_num, conn_num, conn_max;
+ struct ecore_src_iids src_iids;
+
+ OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+ ecore_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (!conn_num)
+ return;
+
+ conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
+ rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
+
+ STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+ STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+ OSAL_LOG2(rounded_conn_num));
+
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->first_free);
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->last_free);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT 0
+#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT 25
+#define TM_CFG_PARENT_PF_MASK 0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT 30
+#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 active_seg_mask = 0, tm_offset, rt_reg;
+ struct ecore_tm_iids tm_iids;
+ u64 cfg_word;
+ u8 i;
+
+ OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+ ecore_cxt_tm_iids(p_mngr, &tm_iids);
+
+ /* @@@TBD No pre-scan for now */
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->hw_info.first_vf_in_pf + i);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+ /* enale scan */
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+ tm_iids.pf_cids ? 0x1 : 0x0);
+
+ /* @@@TBD how to enable the scan for the VFs */
+
+ tm_offset = tm_iids.per_vf_cids;
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->hw_info.first_vf_in_pf + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ tm_offset = tm_iids.pf_cids;
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->p_dev) +
+ p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+ tm_offset += tm_iids.pf_tids[i];
+ }
+
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+ /* @@@TBD how to enable the scan for the VFs */
+}
+
+static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
+{
+}
+
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
+{
+ /* CDU configuration */
+ ecore_cdu_init_common(p_hwfn);
+ ecore_prs_init_common(p_hwfn);
+}
+
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
+{
+ ecore_qm_init_pf(p_hwfn);
+ ecore_cm_init_pf(p_hwfn);
+ ecore_dq_init_pf(p_hwfn);
+ ecore_cdu_init_pf(p_hwfn);
+ ecore_ilt_init_pf(p_hwfn);
+ ecore_src_init_pf(p_hwfn);
+ ecore_tm_init_pf(p_hwfn);
+}
+
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rel_cid;
+
+ if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+ return ECORE_INVAL;
+ }
+
+ rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map,
+ p_mngr->acquired[type].max_count);
+
+ if (rel_cid >= p_mngr->acquired[type].max_count) {
+ DP_NOTICE(p_hwfn, false, "no CID available for protocol %d",
+ type);
+ return ECORE_NORESOURCES;
+ }
+
+ OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+
+ *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+ return ECORE_SUCCESS;
+}
+
+static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
+ u32 cid, enum protocol_type *p_type)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
+ enum protocol_type p;
+ u32 rel_cid;
+
+ /* Iterate over protocols and find matching cid range */
+ for (p = 0; p < MAX_CONN_TYPES; p++) {
+ p_map = &p_mngr->acquired[p];
+
+ if (!p_map->cid_map)
+ continue;
+ if (cid >= p_map->start_cid &&
+ cid < p_map->start_cid + p_map->max_count) {
+ break;
+ }
+ }
+ *p_type = p;
+
+ if (p == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid);
+ return false;
+ }
+ rel_cid = cid - p_map->start_cid;
+ if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) {
+ DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid);
+ return false;
+ }
+ return true;
+}
+
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ enum protocol_type type;
+ bool b_acquired;
+ u32 rel_cid;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+ if (!b_acquired)
+ return;
+
+ rel_cid = cid - p_mngr->acquired[type].start_cid;
+ OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_info *p_info)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+ enum protocol_type type;
+ bool b_acquired;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+ if (!b_acquired)
+ return ECORE_INVAL;
+
+ /* set the protocl type */
+ p_info->type = type;
+
+ /* compute context virtual pointer */
+ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+ conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+ cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+ line = p_info->iid / cxts_per_p;
+
+ /* Make sure context is allocated (dynamic allocation) */
+ if (!p_mngr->ilt_shadow[line].p_virt)
+ return ECORE_INVAL;
+
+ p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
+ p_info->iid % cxts_per_p * conn_cxt_size;
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
+{
+ /* Set the number of required CORE connections */
+ u32 core_cids = 1; /* SPQ */
+
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ {
+ struct ecore_eth_pf_params *p_params =
+ &p_hwfn->pf_params.eth_pf_params;
+
+ ecore_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_ETH,
+ p_params->num_cons, 1); /* FIXME VF count... */
+
+ break;
+ }
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_tid_mem *p_info)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 proto, seg, total_lines, i, shadow_line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_fl_seg;
+ struct ecore_tid_seg *p_seg_info;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ default:
+ return ECORE_INVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return ECORE_INVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+ if (!p_seg_info->has_fl_mem)
+ return ECORE_INVAL;
+
+ p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+ p_fl_seg->real_size_in_page);
+
+ for (i = 0; i < total_lines; i++) {
+ shadow_line = i + p_fl_seg->start_line -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+ }
+ p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+ p_fl_seg->real_size_in_page;
+ p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+ p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+ p_info->tid_size;
+
+ return ECORE_SUCCESS;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static enum _ecore_status_t
+ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 start_iid, u32 count)
+{
+ u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+ u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ u32 end_iid = start_iid + count;
+ struct ecore_ptt *p_ptt;
+ u64 ilt_hw_entry = 0;
+ u32 i;
+
+ if (elem_type == ECORE_ELEM_CXT) {
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ start_line = p_blk->start_line + (start_iid / elems_per_p);
+ end_line = p_blk->start_line + (end_iid / elems_per_p);
+ if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+ end_line--;
+
+ shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+ shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+ return ECORE_TIMEOUT;
+ }
+
+ for (i = shadow_start_line; i < shadow_end_line; i++) {
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ continue;
+
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+ ILT_ENTRY_IN_REGS);
+
+ ecore_wr(p_hwfn, p_ptt, reg_offset, U64_LO(ilt_hw_entry));
+ ecore_wr(p_hwfn, p_ptt, reg_offset + ILT_REG_SIZE_IN_BYTES,
+ U64_HI(ilt_hw_entry));
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto)
+{
+ enum _ecore_status_t rc;
+ u32 cid;
+
+ /* Free Connection CXT */
+ rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT,
+ ecore_cxt_get_proto_cid_start(p_hwfn,
+ proto),
+ ecore_cxt_get_proto_cid_count(p_hwfn,
+ proto,
+ &cid));
+
+ if (rc)
+ return rc;
+
+ /* Free Task CXT */
+ rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
+ ecore_cxt_get_proto_tid_count(p_hwfn,
+ proto));
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+ u32 tid,
+ u8 ctx_type, void **pp_task_ctx)
+{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_seg;
+ struct ecore_tid_seg *p_seg_info;
+ u32 proto, seg;
+ u32 total_lines;
+ u32 tid_size, ilt_idx;
+ u32 num_tids_per_block;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ default:
+ return ECORE_INVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return ECORE_INVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ if (ctx_type == ECORE_CTX_WORKING_MEM) {
+ p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+ } else if (ctx_type == ECORE_CTX_FL_MEM) {
+ if (!p_seg_info->has_fl_mem)
+ return ECORE_INVAL;
+ p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ } else {
+ return ECORE_INVAL;
+ }
+ total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
+ tid_size = p_mngr->task_type_size[p_seg_info->type];
+ num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+ if (total_lines < tid / num_tids_per_block)
+ return ECORE_INVAL;
+
+ ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+ p_mngr->pf_start_line;
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+ (tid % num_tids_per_block) * tid_size;
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
new file mode 100644
index 00000000..1ac95f98
--- /dev/null
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _ECORE_CID_
+#define _ECORE_CID_
+
+#include "ecore_hsi_common.h"
+#include "ecore_proto_if.h"
+#include "ecore_cxt_api.h"
+
+enum ecore_cxt_elem_type {
+ ECORE_ELEM_CXT,
+ ECORE_ELEM_TASK
+};
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid);
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type);
+
+/**
+ * @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration
+ *
+ * @param p_hwfn
+ * @param iids [out], a structure holding all the counters
+ */
+void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids);
+
+/**
+ * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_set_proto_cid_count - Set the max cids per protocol for cxt
+ * init
+ *
+ * @param p_hwfn
+ * @param type
+ * @param cid_cnt - number of pf cids
+ * @param vf_cid_cnt - number of vf cids
+ */
+void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_cnt, u32 vf_cid_cnt);
+/**
+ * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired
+ * map
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per
+ * path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
+
+ /**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+* @brief ecore_cxt_release - Release a cid
+*
+* @param p_hwfn
+* @param cid
+*/
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
+ * associated with the protocol passed.
+ *
+ * @param p_hwfn
+ * @param proto
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto);
+
+#define ECORE_CTX_WORKING_MEM 0
+#define ECORE_CTX_FL_MEM 1
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+ u32 tid,
+ u8 ctx_type, void **task_ctx);
+
+#endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
new file mode 100644
index 00000000..d98dddb2
--- /dev/null
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_CXT_API_H__
+#define __ECORE_CXT_API_H__
+
+struct ecore_hwfn;
+
+struct ecore_cxt_info {
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
+};
+
+#define MAX_TID_BLOCKS 512
+struct ecore_tid_mem {
+ u32 tid_size;
+ u32 num_tids_per_block;
+ u32 waste;
+ u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
+};
+
+static OSAL_INLINE void *get_task_mem(struct ecore_tid_mem *info, u32 tid)
+{
+ /* note: waste is superfluous */
+ return (void *)(info->blocks[tid / info->num_tids_per_block] +
+ (tid % info->num_tids_per_block) * info->tid_size);
+
+ /* more elaborate alternative with no modulo
+ * u32 mask = info->tid_size * info->num_tids_per_block +
+ * info->waste - 1;
+ * u32 index = tid / info->num_tids_per_block;
+ * u32 offset = tid * info->tid_size + index * info->waste;
+ * return (void *)(blocks[index] + (offset & mask));
+ */
+}
+
+/**
+* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+*
+* @param p_hwfn
+* @param type
+* @param p_cid
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
+/**
+* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
+*
+*
+* @param p_hwfn
+* @param p_info in/out
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_info *p_info);
+
+/**
+* @brief ecore_cxt_get_tid_mem_info
+*
+* @param p_hwfn
+* @param p_info
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_tid_mem *p_info);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
new file mode 100644
index 00000000..6a966cb9
--- /dev/null
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -0,0 +1,887 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dcbx.h"
+#include "ecore_cxt.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+
+#define ECORE_DCBX_MAX_MIB_READ_TRY (100)
+#define ECORE_MAX_PFC_PRIORITIES 8
+#define ECORE_ETH_TYPE_DEFAULT (0)
+
+#define ECORE_DCBX_INVALID_PRIORITY 0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+ ((u32)(pri_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+ return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE) ? true : false;
+}
+
+static bool ecore_dcbx_app_port(u32 app_info_bitmap)
+{
+ return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT) ? true : false;
+}
+
+static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return (ecore_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == ECORE_ETH_TYPE_DEFAULT) ? true : false;
+}
+
+static bool ecore_dcbx_enabled(u32 dcbx_cfg_bitmap)
+{
+ return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_DISABLED) ? false : true;
+}
+
+static bool ecore_dcbx_cee(u32 dcbx_cfg_bitmap)
+{
+ return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE) ? true : false;
+}
+
+static bool ecore_dcbx_ieee(u32 dcbx_cfg_bitmap)
+{
+ return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE) ? true : false;
+}
+
+/* @@@TBD A0 Eagle workaround */
+void ecore_dcbx_eagle_workaround(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool set_to_pfc)
+{
+ if (!ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
+ return;
+
+ ecore_wr(p_hwfn, p_ptt,
+ YSEM_REG_FAST_MEMORY + 0x20000 /* RAM in FASTMEM */ +
+ YSTORM_FLOW_CONTROL_MODE_OFFSET,
+ set_to_pfc ? flow_ctrl_pfc : flow_ctrl_pause);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_FLOWCTRL_MODE,
+ EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE);
+}
+
+static void
+ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_results *p_data)
+{
+ struct ecore_hw_info *p_info = &p_hwfn->hw_info;
+ enum dcbx_protocol_type id;
+ bool enable, update;
+ u8 prio, tc, size;
+ const char *name; /* @DPDK */
+ int i;
+
+ size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
+
+ DP_INFO(p_hwfn, "DCBX negotiated: %d\n", p_data->dcbx_enabled);
+
+ for (i = 0; i < size; i++) {
+ id = ecore_dcbx_app_update[i].id;
+ name = ecore_dcbx_app_update[i].name;
+
+ enable = p_data->arr[id].enable;
+ update = p_data->arr[id].update;
+ tc = p_data->arr[id].tc;
+ prio = p_data->arr[id].priority;
+
+ DP_INFO(p_hwfn,
+ "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
+ name, update, enable, prio, tc, p_info->num_tc);
+ }
+}
+
+static void
+ecore_dcbx_set_pf_tcs(struct ecore_hw_info *p_info,
+ u8 tc, enum ecore_pci_personality personality)
+{
+ /* QM reconf data */
+ if (p_info->personality == personality) {
+ if (personality == ECORE_PCI_ETH)
+ p_info->non_offload_tc = tc;
+ else
+ p_info->offload_tc = tc;
+ }
+}
+
+void
+ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
+ struct ecore_hw_info *p_info,
+ bool enable, bool update, u8 prio, u8 tc,
+ enum dcbx_protocol_type type,
+ enum ecore_pci_personality personality)
+{
+ /* PF update ramrod data */
+ p_data->arr[type].update = update;
+ p_data->arr[type].enable = enable;
+ p_data->arr[type].priority = prio;
+ p_data->arr[type].tc = tc;
+
+ ecore_dcbx_set_pf_tcs(p_info, tc, personality);
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
+ struct ecore_hwfn *p_hwfn,
+ bool enable, bool update, u8 prio, u8 tc,
+ enum dcbx_protocol_type type)
+{
+ struct ecore_hw_info *p_info = &p_hwfn->hw_info;
+ enum ecore_pci_personality personality;
+ enum dcbx_protocol_type id;
+ const char *name; /* @DPDK */
+ u8 size;
+ int i;
+
+ size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
+
+ for (i = 0; i < size; i++) {
+ id = ecore_dcbx_app_update[i].id;
+
+ if (type != id)
+ continue;
+
+ personality = ecore_dcbx_app_update[i].personality;
+ name = ecore_dcbx_app_update[i].name;
+
+ ecore_dcbx_set_params(p_data, p_info, enable, update,
+ prio, tc, type, personality);
+ }
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_app_priority(u8 pri_bitmap, u8 *priority)
+{
+ u32 pri_mask, pri = ECORE_MAX_PFC_PRIORITIES;
+ u32 index = ECORE_MAX_PFC_PRIORITIES - 1;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Bitmap 1 corresponds to priority 0, return priority 0 */
+ if (pri_bitmap == 1) {
+ *priority = 0;
+ return rc;
+ }
+
+ /* Choose the highest priority */
+ while ((pri == ECORE_MAX_PFC_PRIORITIES) && index) {
+ pri_mask = 1 << index;
+ if (pri_bitmap & pri_mask)
+ pri = index;
+ index--;
+ }
+
+ if (pri < ECORE_MAX_PFC_PRIORITIES)
+ *priority = (u8)pri;
+ else
+ rc = ECORE_INVAL;
+
+ return rc;
+}
+
+static bool
+ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
+ u32 app_prio_bitmap, u16 id, int *type)
+{
+ bool status = false;
+
+ if (ecore_dcbx_default_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ETH;
+ status = true;
+ } else {
+ DP_ERR(p_hwfn, "Unsupported protocol %d\n", id);
+ }
+
+ return status;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static enum _ecore_status_t
+ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_results *p_data,
+ struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
+ int count, bool dcbx_enabled)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 tc, priority, priority_map;
+ int i, type = -1;
+ u16 protocol_id;
+ bool enable;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Num APP entries = %d\n", count);
+
+ /* Parse APP TLV */
+ for (i = 0; i < count; i++) {
+ protocol_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PRI_MAP);
+ rc = ecore_dcbx_get_app_priority(priority_map, &priority);
+ if (rc == ECORE_INVAL) {
+ DP_ERR(p_hwfn, "Invalid priority\n");
+ return rc;
+ }
+
+ tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority);
+ if (ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ protocol_id, &type)) {
+ /* ETH always have the enable bit reset, as it gets
+ * vlan information per packet. For other protocols,
+ * should be set according to the dcbx_enabled
+ * indication, but we only got here if there was an
+ * app tlv for the protocol, so dcbx must be enabled.
+ */
+ enable = (type == DCBX_PROTOCOL_ETH ? false : true);
+
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+ }
+ /* Update ramrod protocol data and hw_info fields
+ * with default info when corresponding APP TLV's are not detected.
+ * The enabled field has a different logic for ethernet as only for
+ * ethernet dcb should disabled by default, as the information arrives
+ * from the OS (unless an explicit app tlv was present).
+ */
+ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+ for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+ if (p_data->arr[type].update)
+ continue;
+
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static enum _ecore_status_t
+ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
+{
+ struct dcbx_app_priority_feature *p_app;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_dcbx_results data = { 0 };
+ struct dcbx_app_priority_entry *p_tbl;
+ struct dcbx_ets_feature *p_ets;
+ struct ecore_hw_info *p_info;
+ u32 pri_tc_tbl, flags;
+ bool dcbx_enabled;
+ int num_entries;
+
+ /* If DCBx version is non zero, then negotiation was
+ * successfuly performed
+ */
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+ dcbx_enabled = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) != 0;
+
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+ p_info = &p_hwfn->hw_info;
+ num_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+ rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ num_entries, dcbx_enabled);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_info->num_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ data.pf_id = p_hwfn->rel_pf_id;
+ data.dcbx_enabled = dcbx_enabled;
+
+ ecore_dcbx_dp_protocol(p_hwfn, &data);
+
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->results, &data,
+ sizeof(struct ecore_dcbx_results));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_mib_meta_data *p_data,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 prefix_seq_num, suffix_seq_num;
+ int read_count = 0;
+
+ do {
+ if (type == ECORE_DCBX_REMOTE_LLDP_MIB) {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->mib->prefix_seq_num;
+ suffix_seq_num = p_data->mib->suffix_seq_num;
+ }
+ read_count++;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (read_count < ECORE_DCBX_MAX_MIB_READ_TRY));
+
+ if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
+ DP_ERR(p_hwfn,
+ "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ rc = ECORE_IO;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_app_prio *p_prio,
+ struct ecore_dcbx_results *p_results)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+ p_results->arr[DCBX_PROTOCOL_ETH].enable) {
+ p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Priority: eth %d\n", p_prio->eth);
+ }
+
+ return rc;
+}
+
+static void
+ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct ecore_dcbx_params *p_params)
+{
+ int i;
+
+ p_params->app_willing = ECORE_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_WILLING);
+ p_params->app_valid = ECORE_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_ENABLED);
+ p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_ENABLED);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+ p_params->app_bitmap[i] = p_tbl[i].entry;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "APP params: willing %d, valid %d\n",
+ p_params->app_willing, p_params->app_valid);
+}
+
+static void
+ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
+ u32 pfc, struct ecore_dcbx_params *p_params)
+{
+ p_params->pfc_willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->max_pfc_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc_enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+ p_params->pfc_bitmap = pfc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "PFC params: willing %d, pfc_bitmap %d\n",
+ p_params->pfc_willing, p_params->pfc_bitmap);
+}
+
+static void
+ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct ecore_dcbx_params *p_params)
+{
+ int i;
+
+ p_params->ets_willing = ECORE_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_WILLING);
+ p_params->ets_enabled = ECORE_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_ENABLED);
+ p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ p_params->ets_pri_tc_tbl[0] = p_ets->pri_tc_tbl[0];
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "ETS params: willing %d, pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_pri_tc_tbl[0],
+ p_params->max_ets_tc);
+
+ /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+ * encoded in a type u32 array of size 2.
+ */
+ for (i = 0; i < 2; i++) {
+ p_params->ets_tc_tsa_tbl[i] = p_ets->tc_tsa_tbl[i];
+ p_params->ets_tc_bw_tbl[i] = p_ets->tc_bw_tbl[i];
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "elem %d bw_tbl %x tsa_tbl %x\n",
+ i, p_params->ets_tc_bw_tbl[i],
+ p_params->ets_tc_tsa_tbl[i]);
+ }
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct dcbx_ets_feature *p_ets,
+ u32 pfc, struct ecore_dcbx_params *p_params)
+{
+ ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
+ ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+ ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_admin_params *p_local;
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct ecore_dcbx_params *p_data;
+ struct dcbx_ets_feature *p_ets;
+ u32 pfc;
+
+ p_local = &params->local;
+ p_data = &p_local->params;
+ p_app = &p_hwfn->p_dcbx_info->local_admin.features.app;
+ p_tbl = p_app->app_pri_tbl;
+ p_ets = &p_hwfn->p_dcbx_info->local_admin.features.ets;
+ pfc = p_hwfn->p_dcbx_info->local_admin.features.pfc;
+
+ ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+ p_local->valid = true;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_remote_params *p_remote;
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct ecore_dcbx_params *p_data;
+ struct dcbx_ets_feature *p_ets;
+ u32 pfc;
+
+ p_remote = &params->remote;
+ p_data = &p_remote->params;
+ p_app = &p_hwfn->p_dcbx_info->remote.features.app;
+ p_tbl = p_app->app_pri_tbl;
+ p_ets = &p_hwfn->p_dcbx_info->remote.features.ets;
+ pfc = p_hwfn->p_dcbx_info->remote.features.pfc;
+
+ ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+ p_remote->valid = true;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_operational_params *p_operational;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct ecore_dcbx_results *p_results;
+ struct ecore_dcbx_params *p_data;
+ struct dcbx_ets_feature *p_ets;
+ bool enabled, err;
+ u32 pfc, flags;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+
+ /* If DCBx version is non zero, then negotiation
+ * was successfuly performed
+ */
+ p_operational = &params->operational;
+ enabled = ecore_dcbx_enabled(flags);
+ if (!enabled) {
+ p_operational->enabled = enabled;
+ p_operational->valid = false;
+ return ECORE_INVAL;
+ }
+
+ p_data = &p_operational->params;
+ p_results = &p_hwfn->p_dcbx_info->results;
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pfc = p_hwfn->p_dcbx_info->operational.features.pfc;
+
+ p_operational->ieee = ecore_dcbx_ieee(flags);
+ p_operational->cee = ecore_dcbx_cee(flags);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Version support: ieee %d, cee %d\n",
+ p_operational->ieee, p_operational->cee);
+
+ ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+ ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
+ p_results);
+ err = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_operational->err = err;
+ p_operational->enabled = enabled;
+ p_operational->valid = true;
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_lldp_local *p_local;
+ osal_size_t size;
+ u32 *dest;
+
+ p_local = &params->lldp_local;
+
+ size = OSAL_ARRAY_SIZE(p_local->local_chassis_id);
+ dest = p_hwfn->p_dcbx_info->get.lldp_local.local_chassis_id;
+ OSAL_MEMCPY(dest, p_local->local_chassis_id, size);
+
+ size = OSAL_ARRAY_SIZE(p_local->local_port_id);
+ dest = p_hwfn->p_dcbx_info->get.lldp_local.local_port_id;
+ OSAL_MEMCPY(dest, p_local->local_port_id, size);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_lldp_remote *p_remote;
+ osal_size_t size;
+ u32 *dest;
+
+ p_remote = &params->lldp_remote;
+
+ size = OSAL_ARRAY_SIZE(p_remote->peer_chassis_id);
+ dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_chassis_id;
+ OSAL_MEMCPY(dest, p_remote->peer_chassis_id, size);
+
+ size = OSAL_ARRAY_SIZE(p_remote->peer_port_id);
+ dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_port_id;
+ OSAL_MEMCPY(dest, p_remote->peer_port_id, size);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_dcbx_get *p_params;
+
+ p_params = &p_hwfn->p_dcbx_info->get;
+
+ switch (type) {
+ case ECORE_DCBX_REMOTE_MIB:
+ ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ break;
+ case ECORE_DCBX_LOCAL_MIB:
+ ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ break;
+ case ECORE_DCBX_OPERATIONAL_MIB:
+ ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ break;
+ case ECORE_DCBX_REMOTE_LLDP_MIB:
+ rc = ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ case ECORE_DCBX_LOCAL_LLDP_MIB:
+ rc = ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_dcbx_mib_meta_data data;
+
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_config_params);
+ data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+ data.size = sizeof(struct lldp_config_params_s);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_dcbx_mib_meta_data data;
+
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_status_params);
+ data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+ data.size = sizeof(struct lldp_status_params_s);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, operational_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->operational;
+ data.size = sizeof(struct dcbx_mib);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, remote_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->remote;
+ data.size = sizeof(struct dcbx_mib);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.local_admin,
+ data.addr, data.size);
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (type) {
+ case ECORE_DCBX_OPERATIONAL_MIB:
+ rc = ecore_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_REMOTE_MIB:
+ rc = ecore_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_LOCAL_MIB:
+ rc = ecore_dcbx_read_local_mib(p_hwfn, p_ptt);
+ break;
+ case ECORE_DCBX_REMOTE_LLDP_MIB:
+ rc = ecore_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+ break;
+ case ECORE_DCBX_LOCAL_LLDP_MIB:
+ rc = ecore_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+enum _ecore_status_t
+ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ return rc;
+
+ if (type == ECORE_DCBX_OPERATIONAL_MIB) {
+ rc = ecore_dcbx_process_mib_info(p_hwfn);
+ if (!rc) {
+ bool enabled;
+
+ /* reconfigure tcs of QM queues according
+ * to negotiation results
+ */
+ ecore_qm_reconf(p_hwfn, p_ptt);
+
+ /* update storm FW with negotiation results */
+ ecore_sp_pf_update(p_hwfn);
+
+ /* set eagle enigne 1 flow control workaround
+ * according to negotiation results
+ */
+ enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
+ ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, enabled);
+ }
+ }
+ ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+ OSAL_DCBX_AEN(p_hwfn, type);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_dcbx_info));
+ if (!p_hwfn->p_dcbx_info) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_dcbx_info'");
+ rc = ECORE_NOMEM;
+ }
+
+ return rc;
+}
+
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_info *p_dcbx_info)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
+}
+
+static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+ struct ecore_dcbx_results *p_src,
+ enum dcbx_protocol_type type)
+{
+ p_data->dcb_enable_flag = p_src->arr[type].enable;
+ p_data->dcb_priority = p_src->arr[type].priority;
+ p_data->dcb_tc = p_src->arr[type].tc;
+}
+
+/* Set pf update ramrod command params */
+void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest)
+{
+ struct protocol_dcb_data *p_dcb_data;
+ bool update_flag;
+
+ p_dest->pf_id = p_src->pf_id;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+ p_dest->update_eth_dcb_data_flag = update_flag;
+
+ p_dcb_data = &p_dest->eth_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
+
+static
+enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn,
+ enum ecore_mib_read_type type)
+{
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = ECORE_TIMEOUT;
+ DP_ERR(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ rc = ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *p_get,
+ enum ecore_mib_read_type type)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_dcbx_query(p_hwfn, type);
+ if (rc)
+ return rc;
+
+ if (p_get != OSAL_NULL)
+ OSAL_MEMCPY(p_get, &p_hwfn->p_dcbx_info->get,
+ sizeof(struct ecore_dcbx_get));
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
new file mode 100644
index 00000000..d577f4e1
--- /dev/null
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DCBX_H__
+#define __ECORE_DCBX_H__
+
+#include "ecore.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_hsi_common.h"
+#include "ecore_dcbx_api.h"
+
+#define ECORE_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+struct ecore_dcbx_info {
+ struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+ struct dcbx_local_params local_admin;
+ struct ecore_dcbx_results results;
+ struct dcbx_mib operational;
+ struct dcbx_mib remote;
+ struct ecore_dcbx_set set;
+ struct ecore_dcbx_get get;
+ u8 dcbx_cap;
+};
+
+/* Upper layer driver interface routines */
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
+ struct ecore_ptt *,
+ struct ecore_dcbx_set *);
+
+/* ECORE local interface routines */
+enum _ecore_status_t
+ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
+ enum ecore_mib_read_type);
+
+enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
+ struct ecore_ptt *);
+enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
+void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest);
+/* @@@TBD eagle phy workaround */
+void ecore_dcbx_eagle_workaround(struct ecore_hwfn *, struct ecore_ptt *,
+ bool set_to_pfc);
+
+#endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
new file mode 100644
index 00000000..7767d48e
--- /dev/null
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DCBX_API_H__
+#define __ECORE_DCBX_API_H__
+
+#include "ecore.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+
+enum ecore_mib_read_type {
+ ECORE_DCBX_OPERATIONAL_MIB,
+ ECORE_DCBX_REMOTE_MIB,
+ ECORE_DCBX_LOCAL_MIB,
+ ECORE_DCBX_REMOTE_LLDP_MIB,
+ ECORE_DCBX_LOCAL_LLDP_MIB
+};
+
+struct ecore_dcbx_app_data {
+ bool enable; /* DCB enabled */
+ bool update; /* Update indication */
+ u8 priority; /* Priority */
+ u8 tc; /* Traffic Class */
+};
+
+#ifndef __EXTRACT__LINUX__
+enum dcbx_protocol_type {
+ DCBX_PROTOCOL_ETH,
+ DCBX_MAX_PROTOCOL_TYPE
+};
+
+#ifdef LINUX_REMOVE
+/* We can't assume THE HSI values are available to clients, so we need
+ * to redefine those here.
+ */
+#ifndef LLDP_CHASSIS_ID_STAT_LEN
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#endif
+#ifndef LLDP_PORT_ID_STAT_LEN
+#define LLDP_PORT_ID_STAT_LEN 4
+#endif
+#ifndef DCBX_MAX_APP_PROTOCOL
+#define DCBX_MAX_APP_PROTOCOL 32
+#endif
+
+#endif
+
+struct ecore_dcbx_lldp_remote {
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ bool enable_rx;
+ bool enable_tx;
+ u32 tx_interval;
+ u32 max_credit;
+};
+
+struct ecore_dcbx_lldp_local {
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct ecore_dcbx_app_prio {
+ u8 eth;
+};
+
+struct ecore_dcbx_params {
+ u32 app_bitmap[DCBX_MAX_APP_PROTOCOL];
+ u16 num_app_entries;
+ bool app_willing;
+ bool app_valid;
+ bool ets_willing;
+ bool ets_enabled;
+ bool valid; /* Indicate validity of params */
+ u32 ets_pri_tc_tbl[1];
+ u32 ets_tc_bw_tbl[2];
+ u32 ets_tc_tsa_tbl[2];
+ bool pfc_willing;
+ bool pfc_enabled;
+ u32 pfc_bitmap;
+ u8 max_pfc_tc;
+ u8 max_ets_tc;
+};
+
+struct ecore_dcbx_admin_params {
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+};
+
+struct ecore_dcbx_remote_params {
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+};
+
+struct ecore_dcbx_operational_params {
+ struct ecore_dcbx_app_prio app_prio;
+ struct ecore_dcbx_params params;
+ bool valid; /* Indicate validity of params */
+ bool enabled;
+ bool ieee;
+ bool cee;
+ u32 err;
+};
+
+struct ecore_dcbx_get {
+ struct ecore_dcbx_operational_params operational;
+ struct ecore_dcbx_lldp_remote lldp_remote;
+ struct ecore_dcbx_lldp_local lldp_local;
+ struct ecore_dcbx_remote_params remote;
+ struct ecore_dcbx_admin_params local;
+};
+#endif
+
+struct ecore_dcbx_set {
+ struct ecore_dcbx_admin_params config;
+ bool enabled;
+ u32 ver_num;
+};
+
+struct ecore_dcbx_results {
+ bool dcbx_enabled;
+ u8 pf_id;
+ struct ecore_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct ecore_dcbx_app_metadata {
+ enum dcbx_protocol_type id;
+ const char *name; /* @DPDK */
+ enum ecore_pci_personality personality;
+};
+
+struct ecore_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct dcbx_local_params *local_admin;
+ struct dcbx_mib *mib;
+ osal_size_t size;
+ u32 addr;
+};
+
+void
+ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
+ struct ecore_hw_info *p_info,
+ bool enable, bool update, u8 prio, u8 tc,
+ enum dcbx_protocol_type type,
+ enum ecore_pci_personality personality);
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
+ struct ecore_dcbx_get *,
+ enum ecore_mib_read_type);
+
+static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH}
+};
+
+#endif /* __ECORE_DCBX_API_H__ */
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
new file mode 100644
index 00000000..0a689694
--- /dev/null
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -0,0 +1,3597 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore.h"
+#include "ecore_chain.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_int.h"
+#include "ecore_cxt.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dev_api.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_mcp.h"
+#include "ecore_hw_defs.h"
+#include "mcp_public.h"
+#include "ecore_iro.h"
+#include "nvm_cfg.h"
+#include "ecore_dev_api.h"
+#include "ecore_attn_values.h"
+#include "ecore_dcbx.h"
+
+/* Configurable */
+#define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required
+ * load the driver. The number was
+ * arbitrarily set.
+ */
+
+/* Derived */
+#define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
+
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
+{
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+
+ /* The above registers were updated in the past only in CMT mode. Since
+ * they were found to be useful MFW started updating them from 8.7.7.0.
+ * In older MFW versions they are set to 0 which means disabled.
+ */
+ if (!val) {
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR"
+ " size of 256kB for GRC and 512kB for DB\n");
+ return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ }
+
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR"
+ " size of 512kB for GRC and 512kB for DB\n");
+ return 512 * 1024;
+ }
+
+ return 1 << (val + 15);
+}
+
+void ecore_init_dp(struct ecore_dev *p_dev,
+ u32 dp_module, u8 dp_level, void *dp_ctx)
+{
+ u32 i;
+
+ p_dev->dp_level = dp_level;
+ p_dev->dp_module = dp_module;
+ p_dev->dp_ctx = dp_ctx;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ p_hwfn->dp_level = dp_level;
+ p_hwfn->dp_module = dp_module;
+ p_hwfn->dp_ctx = dp_ctx;
+ }
+}
+
+void ecore_init_struct(struct ecore_dev *p_dev)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ p_hwfn->p_dev = p_dev;
+ p_hwfn->my_id = i;
+ p_hwfn->b_active = false;
+
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+ OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
+ }
+
+ /* hwfn 0 is always active */
+ p_dev->hwfns[0].b_active = true;
+
+ /* set the default cache alignment to 128 (may be overridden later) */
+ p_dev->cache_shift = 7;
+}
+
+static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
+ qm_info->qm_pq_params = OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
+ qm_info->qm_vport_params = OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
+ qm_info->qm_port_params = OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
+ qm_info->wfq_data = OSAL_NULL;
+}
+
+void ecore_resc_free(struct ecore_dev *p_dev)
+{
+ int i;
+
+ if (IS_VF(p_dev))
+ return;
+
+ OSAL_FREE(p_dev, p_dev->fw_data);
+ p_dev->fw_data = OSAL_NULL;
+
+ OSAL_FREE(p_dev, p_dev->reset_stats);
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
+ p_hwfn->p_tx_cids = OSAL_NULL;
+ OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
+ p_hwfn->p_rx_cids = OSAL_NULL;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ ecore_cxt_mngr_free(p_hwfn);
+ ecore_qm_info_free(p_hwfn);
+ ecore_spq_free(p_hwfn);
+ ecore_eq_free(p_hwfn, p_hwfn->p_eq);
+ ecore_consq_free(p_hwfn, p_hwfn->p_consq);
+ ecore_int_free(p_hwfn);
+ ecore_iov_free(p_hwfn);
+ ecore_dmae_info_free(p_hwfn);
+ ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+ /* @@@TBD Flush work-queue ? */
+ }
+}
+
+static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
+ bool b_sleepable)
+{
+ u8 num_vports, vf_offset = 0, i, vport_id, num_ports;
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_port_params *p_qm_port;
+ u16 num_pqs, multi_cos_tcs = 1;
+#ifdef CONFIG_ECORE_SRIOV
+ u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
+#else
+ u16 num_vfs = 0;
+#endif
+
+ OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
+
+#ifndef ASIC_ONLY
+ /* @TMP - Don't allocate QM queues for VFs on emulation */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "Emulation - skip configuring QM queues for VFs\n");
+ num_vfs = 0;
+ }
+#endif
+
+ num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
+ num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
+
+ /* Sanity checking that setup requires legal number of resources */
+ if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
+ DP_ERR(p_hwfn,
+ "Need too many Physical queues - 0x%04x when"
+ " only %04x are available\n",
+ num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
+ return ECORE_INVAL;
+ }
+
+ /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
+ * then special queues, then per-VF PQ.
+ */
+ qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
+ b_sleepable ? GFP_KERNEL :
+ GFP_ATOMIC,
+ sizeof(struct init_qm_pq_params) *
+ num_pqs);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
+ b_sleepable ? GFP_KERNEL :
+ GFP_ATOMIC,
+ sizeof(struct
+ init_qm_vport_params) *
+ num_vports);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
+ b_sleepable ? GFP_KERNEL :
+ GFP_ATOMIC,
+ sizeof(struct init_qm_port_params)
+ * MAX_NUM_PORTS);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
+ b_sleepable ? GFP_KERNEL :
+ GFP_ATOMIC,
+ sizeof(struct ecore_wfq_data) *
+ num_vports);
+
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+
+ /* First init per-TC PQs */
+ for (i = 0; i < multi_cos_tcs; i++) {
+ struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+ if (p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
+ } else {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.offload_tc;
+ params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
+ }
+ }
+
+ /* Then init pure-LB PQ */
+ qm_info->pure_lb_pq = i;
+ qm_info->qm_pq_params[i].vport_id =
+ (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[i].wrr_group = 1;
+ i++;
+
+ /* Then init per-VF PQs */
+ vf_offset = i;
+ for (i = 0; i < num_vfs; i++) {
+ /* First vport is used by the PF */
+ qm_info->qm_pq_params[vf_offset + i].vport_id = vport_id +
+ i + 1;
+ qm_info->qm_pq_params[vf_offset + i].tc_id =
+ p_hwfn->hw_info.non_offload_tc;
+ qm_info->qm_pq_params[vf_offset + i].wrr_group = 1;
+ };
+
+ qm_info->vf_queues_offset = vf_offset;
+ qm_info->num_pqs = num_pqs;
+ qm_info->num_vports = num_vports;
+
+ /* Initialize qm port parameters */
+ num_ports = p_hwfn->p_dev->num_ports_in_engines;
+ for (i = 0; i < num_ports; i++) {
+ p_qm_port = &qm_info->qm_port_params[i];
+ p_qm_port->active = 1;
+ /* @@@TMP - was NUM_OF_PHYS_TCS; Changed until dcbx will
+ * be in place
+ */
+ if (num_ports == 4)
+ p_qm_port->num_active_phys_tcs = 2;
+ else
+ p_qm_port->num_active_phys_tcs = 5;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+ }
+
+ if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
+ qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
+ else
+ qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+
+ qm_info->num_vf_pqs = num_vfs;
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+
+ for (i = 0; i < qm_info->num_vports; i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+
+ qm_info->pf_wfq = 0;
+ qm_info->pf_rl = 0;
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ return ECORE_SUCCESS;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+ ecore_qm_info_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ enum _ecore_status_t rc;
+ bool b_rc;
+
+ /* qm_info is allocated in ecore_init_qm_info() which is already called
+ * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
+ * The allocated size may change each init, so we free it before next
+ * allocation.
+ */
+ ecore_qm_info_free(p_hwfn);
+
+ /* initialize ecore's qm data structure */
+ rc = ecore_init_qm_info(p_hwfn, false);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* stop PF's qm queues */
+ b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ if (!b_rc)
+ return ECORE_INVAL;
+
+ /* clear the QM_PF runtime phase leftovers from previous init */
+ ecore_init_clear_rt_data(p_hwfn);
+
+ /* prepare QM portion of runtime array */
+ ecore_qm_init_pf(p_hwfn);
+
+ /* activate init tool on runtime array */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+ p_hwfn->hw_info.hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* start PF's qm queues */
+ b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ if (!rc)
+ return ECORE_INVAL;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_consq *p_consq;
+ struct ecore_eq *p_eq;
+ int i;
+
+ if (IS_VF(p_dev))
+ return rc;
+
+ p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(struct ecore_fw_data));
+ if (!p_dev->fw_data)
+ return ECORE_NOMEM;
+
+ /* Allocate Memory for the Queue->CID mapping */
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ /* @@@TMP - resc management, change to actual required size */
+ int tx_size = sizeof(struct ecore_hw_cid_data) *
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ int rx_size = sizeof(struct ecore_hw_cid_data) *
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+
+ p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ tx_size);
+ if (!p_hwfn->p_tx_cids) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate memory for Tx Cids\n");
+ goto alloc_no_mem;
+ }
+
+ p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ rx_size);
+ if (!p_hwfn->p_rx_cids) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate memory for Rx Cids\n");
+ goto alloc_no_mem;
+ }
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ /* First allocate the context manager structure */
+ rc = ecore_cxt_mngr_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Set the HW cid/tid numbers (in the contest manager)
+ * Must be done prior to any further computations.
+ */
+ rc = ecore_cxt_set_pf_params(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Prepare and process QM requirements */
+ rc = ecore_init_qm_info(p_hwfn, true);
+ if (rc)
+ goto alloc_err;
+
+ /* Compute the ILT client partition */
+ rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* CID map / ILT shadow table / T2
+ * The talbes sizes are determined by the computations above
+ */
+ rc = ecore_cxt_tables_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SPQ, must follow ILT because initializes SPQ context */
+ rc = ecore_spq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SP status block allocation */
+ p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+
+ rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* EQ */
+ p_eq = ecore_eq_alloc(p_hwfn, 256);
+ if (!p_eq)
+ goto alloc_no_mem;
+ p_hwfn->p_eq = p_eq;
+
+ p_consq = ecore_consq_alloc(p_hwfn);
+ if (!p_consq)
+ goto alloc_no_mem;
+ p_hwfn->p_consq = p_consq;
+
+ /* DMA info initialization */
+ rc = ecore_dmae_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate memory for"
+ " dmae_info structure\n");
+ goto alloc_err;
+ }
+
+ /* DCBX initialization */
+ rc = ecore_dcbx_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate memory for dcbxstruct\n");
+ goto alloc_err;
+ }
+ }
+
+ p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(struct ecore_eth_stats));
+ if (!p_dev->reset_stats) {
+ DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
+ goto alloc_no_mem;
+ }
+
+ return ECORE_SUCCESS;
+
+alloc_no_mem:
+ rc = ECORE_NOMEM;
+alloc_err:
+ ecore_resc_free(p_dev);
+ return rc;
+}
+
+void ecore_resc_setup(struct ecore_dev *p_dev)
+{
+ int i;
+
+ if (IS_VF(p_dev))
+ return;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ ecore_cxt_mngr_setup(p_hwfn);
+ ecore_spq_setup(p_hwfn);
+ ecore_eq_setup(p_hwfn, p_hwfn->p_eq);
+ ecore_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+ /* Read shadow of current MFW mailbox */
+ ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+ OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+
+ ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+ ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+ }
+}
+
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME (10)
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 id, bool is_vf)
+{
+ u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ enum _ecore_status_t rc = ECORE_TIMEOUT;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
+ CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+
+ if (is_vf)
+ id += 0x10;
+
+ command |= X_FINAL_CLEANUP_AGG_INT <<
+ SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+ command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+ command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+ command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+ /* Make sure notification is not set before initiating final cleanup */
+ if (REG_RD(p_hwfn, addr)) {
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected; Found final cleanup notification "
+ "before initiating final cleanup\n");
+ REG_WR(p_hwfn, addr, 0);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ id, OSAL_CPU_TO_LE32(command));
+
+ ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
+ OSAL_CPU_TO_LE32(command));
+
+ /* Poll until completion */
+ while (!REG_RD(p_hwfn, addr) && count--)
+ OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
+
+ if (REG_RD(p_hwfn, addr))
+ rc = ECORE_SUCCESS;
+ else
+ DP_NOTICE(p_hwfn, true,
+ "Failed to receive FW final cleanup notification\n");
+
+ /* Cleanup afterwards */
+ REG_WR(p_hwfn, addr, 0);
+
+ return rc;
+}
+
+static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
+{
+ int hw_mode = 0;
+
+ switch (ECORE_GET_TYPE(p_hwfn->p_dev)) {
+ case CHIP_BB_A0:
+ hw_mode |= 1 << MODE_BB_A0;
+ break;
+ case CHIP_BB_B0:
+ hw_mode |= 1 << MODE_BB_B0;
+ break;
+ case CHIP_K2:
+ hw_mode |= 1 << MODE_K2;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Can't initialize chip ID %d\n",
+ ECORE_GET_TYPE(p_hwfn->p_dev));
+ return;
+ }
+
+ /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
+ switch (p_hwfn->p_dev->num_ports_in_engines) {
+ case 1:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+ break;
+ case 2:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+ break;
+ case 4:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "num_ports_in_engine = %d not supported\n",
+ p_hwfn->p_dev->num_ports_in_engines);
+ return;
+ }
+
+ switch (p_hwfn->p_dev->mf_mode) {
+ case ECORE_MF_DEFAULT:
+ case ECORE_MF_NPAR:
+ hw_mode |= 1 << MODE_MF_SI;
+ break;
+ case ECORE_MF_OVLAN:
+ hw_mode |= 1 << MODE_MF_SD;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Unsupported MF mode, init as DEFAULT\n");
+ hw_mode |= 1 << MODE_MF_SI;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_FPGA;
+ } else {
+ if (p_hwfn->p_dev->b_is_emul_full)
+ hw_mode |= 1 << MODE_EMUL_FULL;
+ else
+ hw_mode |= 1 << MODE_EMUL_REDUCED;
+ }
+ } else
+#endif
+ hw_mode |= 1 << MODE_ASIC;
+
+ if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
+ hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
+
+ if (p_hwfn->p_dev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
+ p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
+}
+
+#ifndef ASIC_ONLY
+/* MFW-replacement initializations for non-ASIC */
+static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 pl_hv = 1;
+ int i;
+
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
+ pl_hv |= 0x600;
+
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
+
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
+
+ /* initialize interrupt masks */
+ for (i = 0;
+ i <
+ attn_blocks[BLOCK_MISCS].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+ num_of_int_regs; i++)
+ ecore_wr(p_hwfn, p_ptt,
+ attn_blocks[BLOCK_MISCS].
+ chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[i]->
+ mask_addr, 0);
+
+ if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_wr(p_hwfn, p_ptt,
+ attn_blocks[BLOCK_CNIG].
+ chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
+ mask_addr, 0);
+ ecore_wr(p_hwfn, p_ptt,
+ attn_blocks[BLOCK_PGLCS].
+ chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
+ mask_addr, 0);
+ ecore_wr(p_hwfn, p_ptt,
+ attn_blocks[BLOCK_CPMU].
+ chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
+ mask_addr, 0);
+ /* Currently A0 and B0 interrupt bits are the same in pglue_b;
+ * If this changes, need to set this according to chip type. <14/09/23>
+ */
+ ecore_wr(p_hwfn, p_ptt,
+ attn_blocks[BLOCK_PGLUE_B].
+ chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
+ mask_addr, 0x80000);
+
+ /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
+ /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
+ if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4);
+
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
+ /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+ (p_hwfn->p_dev->num_ports_in_engines >> 1));
+
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+ p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ }
+
+ /* Poll on RBC */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
+ for (i = 0; i < 100; i++) {
+ OSAL_UDELAY(50);
+ if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
+ break;
+ }
+ if (i == 100)
+ DP_NOTICE(p_hwfn, true,
+ "RBC done failed to complete in PSWRQ2\n");
+}
+#endif
+
+/* Init run time data for all PFs and their VFs on an engine.
+ * TBD - for VFs - Once we have parent PF info for each VF in
+ * shmem available as CAU requires knowledge of parent PF for each VF.
+ */
+static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
+{
+ u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+ int i, sb_id;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_igu_info *p_igu_info;
+ struct ecore_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
+ sb_id++) {
+ p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+ if (!p_block->is_pf)
+ continue;
+
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_block->function_id, 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+ }
+ }
+}
+
+static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 vf_id, max_num_vfs;
+ u16 num_pfs, pf_id;
+ u32 concrete_fid;
+
+ ecore_init_cau_rt_data(p_dev);
+
+ /* Program GTT windows */
+ ecore_gtt_init(p_hwfn);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+#endif
+
+ if (p_hwfn->mcp_info) {
+ if (p_hwfn->mcp_info->func_info.bandwidth_max)
+ qm_info->pf_rl_en = 1;
+ if (p_hwfn->mcp_info->func_info.bandwidth_min)
+ qm_info->pf_wfq_en = 1;
+ }
+
+ ecore_qm_common_rt_init(p_hwfn,
+ p_hwfn->p_dev->num_ports_in_engines,
+ qm_info->max_phys_tcs_per_port,
+ qm_info->pf_rl_en, qm_info->pf_wfq_en,
+ qm_info->vport_rl_en, qm_info->vport_wfq_en,
+ qm_info->qm_port_params);
+
+ ecore_cxt_hw_init_common(p_hwfn);
+
+ /* Close gate from NIG to BRB/Storm; By default they are open, but
+ * we close them to prevent NIG from passing data to reset blocks.
+ * Should have been done in the ENGINE phase, but init-tool lacks
+ * proper port-pretend capabilities.
+ */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+ ecore_port_unpretend(p_hwfn, p_ptt);
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
+ * need to decide with which value, maybe runtime
+ */
+ ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+ if (num_pfs == 1)
+ return rc;
+ /* pretend to original PF */
+ ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+
+ /* Workaround for avoiding CCFC execution error when getting packets
+ * with CRC errors, and allowing instead the invoking of the FW error
+ * handler.
+ * This is not done inside the init tool since it currently can't
+ * perform a pretending to VFs.
+ */
+ max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
+ : MAX_NUM_VFS_BB;
+ for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
+ concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
+ ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ }
+ /* pretend to original PF */
+ ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
+ return rc;
+}
+
+#ifndef ASIC_ONLY
+#define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4)
+#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5)
+
+#define PMEG_IF_BYTE_COUNT 8
+
+static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u64 data, u8 reg_type, u8 port)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
+ ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) |
+ (8 << PMEG_IF_BYTE_COUNT),
+ (reg_type << 25) | (addr << 8) | port,
+ (u32)((data >> 32) & 0xffffffff),
+ (u32)(data & 0xffffffff));
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0,
+ (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) &
+ 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0,
+ (reg_type << 25) | (addr << 8) | port);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+ data & 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+ (data >> 32) & 0xffffffff);
+}
+
+#define XLPORT_MODE_REG (0x20a)
+#define XLPORT_MAC_CONTROL (0x210)
+#define XLPORT_FLOW_CONTROL_CONFIG (0x207)
+#define XLPORT_ENABLE_REG (0x20b)
+
+#define XLMAC_CTRL (0x600)
+#define XLMAC_MODE (0x601)
+#define XLMAC_RX_MAX_SIZE (0x608)
+#define XLMAC_TX_CTRL (0x604)
+#define XLMAC_PAUSE_CTRL (0x60d)
+#define XLMAC_PFC_CTRL (0x60e)
+
+static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 port = p_hwfn->port_id;
+ u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE;
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
+ (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) |
+ (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT)
+ | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE,
+ 1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH,
+ 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH,
+ 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS,
+ 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS,
+ (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) |
+ (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853);
+}
+
+static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 loopback = 0, port = p_hwfn->port_id * 2;
+
+ DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+ if (ECORE_IS_AH(p_hwfn->p_dev)) {
+ ecore_emul_link_init_ah(p_hwfn, p_ptt);
+ return;
+ }
+
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
+ port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
+ 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
+ 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
+ 0x30ffffc000ULL, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
+ port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
+ 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
+}
+
+static void ecore_link_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port)
+{
+ int port_offset = port ? 0x800 : 0;
+ u32 xmac_rxctrl = 0;
+
+ /* Reset of XMAC */
+ /* FIXME: move to common start */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
+ OSAL_MSLEEP(1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
+
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+ /* Set the number of ports on the Warp Core to 10G */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+
+ /* Soft reset of XMAC */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+ OSAL_MSLEEP(1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+ MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+
+ /* FIXME: move to common end */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20);
+
+ /* Set Max packet size: initialize XMAC block register for port 0 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710);
+
+ /* CRC append for Tx packets: init XMAC block register for port 1 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800);
+
+ /* Enable TX and RX: initialize XMAC block register for port 1 */
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset,
+ XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN);
+ xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset);
+ xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE;
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl);
+}
+#endif
+
+static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Init sequence */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+ ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id);
+ } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ /* Activate OPTE in CMT */
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
+ val |= 0x10;
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
+ 0x55555555);
+ }
+
+ ecore_emul_link_init(p_hwfn, p_ptt);
+ } else {
+ DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
+ }
+#endif
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 pwm_regsize, norm_regsize;
+ u32 non_pwm_conn, min_addr_reg1;
+ u32 db_bar_size, n_cpus;
+ u32 pf_dems_shift;
+ int rc = ECORE_SUCCESS;
+
+ db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
+ if (p_hwfn->p_dev->num_hwfns > 1)
+ db_bar_size /= 2;
+
+ /* Calculate doorbell regions
+ * -----------------------------------
+ * The doorbell BAR is made of two regions. The first is called normal
+ * region and the second is called PWM region. In the normal region
+ * each ICID has its own set of addresses so that writing to that
+ * specific address identifies the ICID. In the Process Window Mode
+ * region the ICID is given in the data written to the doorbell. The
+ * above per PF register denotes the offset in the doorbell BAR in which
+ * the PWM region begins.
+ * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
+ * non-PWM connection. The calculation below computes the total non-PWM
+ * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
+ * in units of 4,096 bytes.
+ */
+ non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ OSAL_NULL) +
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
+ norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ min_addr_reg1 = norm_regsize / 4096;
+ pwm_regsize = db_bar_size - norm_regsize;
+
+ /* Check that the normal and PWM sizes are valid */
+ if (db_bar_size < norm_regsize) {
+ DP_ERR(p_hwfn->p_dev,
+ "Doorbell BAR size 0x%x is too"
+ " small (normal region is 0x%0x )\n",
+ db_bar_size, norm_regsize);
+ return ECORE_NORESOURCES;
+ }
+ if (pwm_regsize < ECORE_MIN_PWM_REGION) {
+ DP_ERR(p_hwfn->p_dev,
+ "PWM region size 0x%0x is too small."
+ " Should be at least 0x%0x (Doorbell BAR size"
+ " is 0x%x and normal region size is 0x%0x)\n",
+ pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
+ norm_regsize);
+ return ECORE_NORESOURCES;
+ }
+
+ /* Update hwfn */
+ p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
+ * calculate the doorbell
+ * address
+ */
+
+ /* Update registers */
+ /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+ pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+ DP_INFO(p_hwfn,
+ "Doorbell size 0x%x, Normal region 0x%x, PWM region 0x%x\n",
+ db_bar_size, norm_regsize, pwm_regsize);
+ DP_INFO(p_hwfn, "DPI size 0x%x, DPI count 0x%x\n", p_hwfn->dpi_size,
+ p_hwfn->dpi_count);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_tunn_start_params *p_tunn,
+ int hw_mode,
+ bool b_hw_start,
+ enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 rel_pf_id = p_hwfn->rel_pf_id;
+ u32 prs_reg;
+ u16 ctrl;
+ int pos;
+
+ /* ILT/DQ/CM/QM */
+ if (p_hwfn->mcp_info) {
+ struct ecore_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+ if (p_info->bandwidth_min)
+ p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+ /* Update rate limit once we'll actually have a link */
+ p_hwfn->qm_info.pf_rl = 100;
+ }
+ ecore_cxt_hw_init_pf(p_hwfn);
+
+ ecore_int_igu_init_rt(p_hwfn); /* @@@TBD TODO MichalS multi hwfn ?? */
+
+ /* Set VLAN in NIG if needed */
+ if (hw_mode & (1 << MODE_MF_SD)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+ p_hwfn->hw_info.ovlan);
+ }
+
+ /* Enable classification by MAC if needed */
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET,
+ 1);
+ }
+
+ /* Protocl Configuration - @@@TBD - should we set 0 otherwise? */
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+
+ /* perform debug configuration when chip is out of reset */
+ OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
+
+ /* Cleanup chip from previous driver if such remains exist */
+ rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
+ if (rc != ECORE_SUCCESS) {
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+ return rc;
+ }
+
+ /* PF Init sequence */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* Pure runtime initializations - directly to the HW */
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+ /* PCI relaxed ordering causes a decrease in the performance on some
+ * systems. Till a root cause is found, disable this attribute in the
+ * PCI config space.
+ */
+ /* Not in use @DPDK
+ * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
+ * if (!pos) {
+ * DP_NOTICE(p_hwfn, true,
+ * "Failed to find the PCI Express"
+ * " Capability structure in the PCI config space\n");
+ * return ECORE_IO;
+ * }
+ * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
+ * &ctrl);
+ * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
+ * &ctrl);
+ */
+
+#ifndef ASIC_ONLY
+ /*@@TMP - On B0 build 1, need to mask the datapath_registers parity */
+ if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev) &&
+ (p_hwfn->p_dev->chip_metal == 1)) {
+ u32 reg_addr, tmp;
+
+ reg_addr =
+ attn_blocks[BLOCK_PGLUE_B].
+ chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].prty_regs[0]->
+ mask_addr;
+ DP_NOTICE(p_hwfn, false,
+ "Masking datapath registers parity on"
+ " B0 emulation [build 1]\n");
+ tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
+ tmp |= (1 << 0); /* Was PRTY_MASK_DATAPATH_REGISTERS */
+ ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
+ }
+#endif
+
+ rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ if (b_hw_start) {
+ /* enable interrupts */
+ ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+ /* send function start command */
+ rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
+ allow_npar_tx_switch);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Function start ramrod failed\n");
+ } else {
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH register after start PFn\n");
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_SEARCH_TCP_FIRST_FRAG);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
+ prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ }
+ }
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 enable)
+{
+ u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+ /* Change PF in PXP */
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+ /* wait until value is set - try for 1 second every 50us */
+ for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+ val = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ if (val == set_val)
+ break;
+
+ OSAL_UDELAY(50);
+ }
+
+ if (val != set_val) {
+ DP_NOTICE(p_hwfn, true,
+ "PFID_ENABLE_MASTER wasn't changed after a second\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_main_ptt)
+{
+ /* Read shadow of current MFW mailbox */
+ ecore_mcp_read_mb(p_hwfn, p_main_ptt);
+ OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+}
+
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+ struct ecore_tunn_start_params *p_tunn,
+ bool b_hw_start,
+ enum ecore_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data)
+{
+ enum _ecore_status_t rc, mfw_rc;
+ u32 load_code, param;
+ int i, j;
+
+ if (IS_PF(p_dev)) {
+ rc = ecore_init_fw_data(p_dev, bin_fw_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (IS_VF(p_dev)) {
+ rc = ecore_vf_pf_init(p_hwfn);
+ if (rc)
+ return rc;
+ continue;
+ }
+
+ /* Enable DMAE in PXP */
+ rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+ ecore_calc_hw_mode(p_hwfn);
+ /* @@@TBD need to add here:
+ * Check for fan failure
+ * Prev_unload
+ */
+ rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending LOAD_REQ command\n");
+ return rc;
+ }
+
+ /* CQ75580:
+ * When coming back from hiberbate state, the registers from
+ * which shadow is read initially are not initialized. It turns
+ * out that these registers get initialized during the call to
+ * ecore_mcp_load_req request. So we need to reread them here
+ * to get the proper shadow register value.
+ * Note: This is a workaround for the missinginig MFW
+ * initialization. It may be removed once the implementation
+ * is done.
+ */
+ ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load request was sent.Resp:0x%x, Load code: 0x%x\n",
+ rc, load_code);
+
+ /* Only relevant for recovery:
+ * Clear the indication after the LOAD_REQ command is responded
+ * by the MFW.
+ */
+ p_dev->recov_in_prog = false;
+
+ p_hwfn->first_on_engine = (load_code ==
+ FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+ /* Fall into */
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+
+ if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
+ struct init_nig_pri_tc_map_req tc_map;
+
+ OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
+
+ /* remove this once flow control is
+ * implemented
+ */
+ for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
+ tc_map.pri[j].tc_id = 0;
+ tc_map.pri[j].valid = 1;
+ }
+ ecore_init_nig_pri_tc_map(p_hwfn,
+ p_hwfn->p_main_ptt,
+ &tc_map);
+ }
+ /* fallthrough */
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn, p_hwfn->hw_info.hw_mode,
+ b_hw_start, int_mode,
+ allow_npar_tx_switch);
+ break;
+ default:
+ rc = ECORE_NOTIMPL;
+ break;
+ }
+
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, true,
+ "init phase failed loadcode 0x%x (rc %d)\n",
+ load_code, rc);
+
+ /* ACK mfw regardless of success or failure of initialization */
+ mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_LOAD_DONE,
+ 0, &load_code, &param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ if (mfw_rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending LOAD_DONE command\n");
+ return mfw_rc;
+ }
+
+ /* send DCBX attention request command */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "sending phony dcbx set command to trigger DCBx"
+ " attention handling\n");
+ mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &load_code, &param);
+ if (mfw_rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to send DCBX attention request\n");
+ return mfw_rc;
+ }
+
+ p_hwfn->hw_init_done = true;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_HW_STOP_RETRY_LIMIT (10)
+static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ int i;
+
+ /* close timers */
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+ for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT &&
+ !p_dev->recov_in_prog; i++) {
+ if ((!ecore_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ /* Dependent on number of connection/tasks, possibly
+ * 1ms sleep is required between polls
+ */
+ OSAL_MSLEEP(1);
+ }
+ if (i == ECORE_HW_STOP_RETRY_LIMIT)
+ DP_NOTICE(p_hwfn, true,
+ "Timers linear scans are not over"
+ " [Connection %02x Tasks %02x]\n",
+ (u8)ecore_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)ecore_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
+{
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+ }
+}
+
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS, t_rc;
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+
+ /* mark the hw as uninitialized... */
+ p_hwfn->hw_init_done = false;
+
+ rc = ecore_sp_pf_stop(p_hwfn);
+ if (rc)
+ DP_NOTICE(p_hwfn, true,
+ "Failed to close PF against FW. Continue to"
+ " stop HW to prevent illegal host access"
+ " by the device\n");
+
+ /* perform debug action after PF stop was sent */
+ OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
+
+ /* close NIG to BRB gate */
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ /* close parser */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ /* @@@TBD - clean transmission queues (5.b) */
+ /* @@@TBD - clean BTB (5.c) */
+
+ ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+
+ /* @@@TBD - verify DMAE requests are done (8) */
+
+ /* Disable Attention Generation */
+ ecore_int_igu_disable_int(p_hwfn, p_ptt);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ OSAL_MSLEEP(1);
+ }
+
+ if (IS_PF(p_dev)) {
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],
+ p_dev->hwfns[0].p_main_ptt, false);
+ if (t_rc != ECORE_SUCCESS)
+ rc = t_rc;
+ }
+
+ return rc;
+}
+
+void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+{
+ int j;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+ "Shutting down the fastpath\n");
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ /* @@@TBD - clean transmission queues (5.b) */
+ /* @@@TBD - clean BTB (5.c) */
+
+ /* @@@TBD - verify DMAE requests are done (8) */
+
+ ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ OSAL_MSLEEP(1);
+ }
+}
+
+void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return;
+
+ /* Re-open incoming traffic */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 reg,
+ bool expected)
+{
+ u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg);
+
+ if (assert_val != expected) {
+ DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n",
+ reg, expected);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return 0;
+}
+
+enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 unload_resp, unload_param;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (IS_VF(p_dev)) {
+ rc = ecore_vf_pf_reset(p_hwfn);
+ if (rc)
+ return rc;
+ continue;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
+
+ /* Check for incorrect states */
+ if (!p_dev->recov_in_prog) {
+ ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+ /* @@@TBD - assert on incorrect xCFC values (10.b) */
+ }
+
+ /* Disable PF in HW blocks */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ TCFC_REG_STRONG_ENABLE_PF, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ CCFC_REG_STRONG_ENABLE_PF, 0);
+
+ if (p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+ "Recovery is in progress -> skip "
+ "sending unload_req/done\n");
+ break;
+ }
+
+ /* Send unload command to MCP */
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_REQ,
+ DRV_MB_PARAM_UNLOAD_WOL_MCP,
+ &unload_resp, &unload_param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_hw_reset: UNLOAD_REQ failed\n");
+ /* @@TBD - what to do? for now, assume ENG. */
+ unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_UNLOAD_DONE,
+ 0, &unload_resp, &unload_param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn,
+ true, "ecore_hw_reset: UNLOAD_DONE failed\n");
+ /* @@@TBD - Should it really ASSERT here ? */
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
+{
+ ecore_ptt_pool_free(p_hwfn);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
+{
+ /* clear indirect access */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+ /* Clean Previous errors if such exist */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+
+ /* enable internal target-read */
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+}
+
+static void get_function_id(struct ecore_hwfn *p_hwfn)
+{
+ /* ME Register */
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+ /* Bits 16-19 from the ME registers are the pf_num */
+ /* @@ @TBD - check, may be wrong after B0 implementation for CMT */
+ p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+ p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID);
+ p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PORT);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+ p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
+}
+
+static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
+{
+ u32 *feat_num = p_hwfn->hw_info.feat_num;
+ int num_features = 1;
+
+ /* L2 Queues require each: 1 status block. 1 L2 queue */
+ feat_num[ECORE_PF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_SB) / num_features,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+ feat_num[ECORE_PF_L2_QUE],
+ RESC_NUM(p_hwfn, ECORE_SB), num_features);
+}
+
+/* @@@TBD MK RESC: This info is currently hard code and set as if we were MF
+ * need to read it from shmem...
+ */
+static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
+{
+ u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u8 num_funcs = p_hwfn->num_funcs_on_engine;
+ u32 *resc_num = p_hwfn->hw_info.resc_num;
+ int i, max_vf_vlan_filters;
+ struct ecore_sb_cnt_info sb_cnt_info;
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+
+ OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+
+#ifdef CONFIG_ECORE_SRIOV
+ max_vf_vlan_filters = ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS;
+#else
+ max_vf_vlan_filters = 0;
+#endif
+
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ resc_num[ECORE_SB] = OSAL_MIN_T(u32,
+ (MAX_SB_PER_PATH_BB / num_funcs),
+ sb_cnt_info.sb_cnt);
+
+ resc_num[ECORE_L2_QUEUE] = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ resc_num[ECORE_VPORT] = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
+ resc_num[ECORE_RSS_ENG] = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ resc_num[ECORE_PQ] = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ MAX_QM_TX_QUEUES_BB) / num_funcs;
+ resc_num[ECORE_RL] = 8;
+ resc_num[ECORE_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+ resc_num[ECORE_VLAN] = (ETH_NUM_VLAN_FILTERS -
+ max_vf_vlan_filters +
+ 1 /*For vlan0 */) / num_funcs;
+
+ /* TODO - there will be a problem in AH - there are only 11k lines */
+ resc_num[ECORE_ILT] = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ /* Reduced build contains less PQs */
+ if (!(p_hwfn->p_dev->b_is_emul_full))
+ resc_num[ECORE_PQ] = 32;
+
+ /* For AH emulation, since we have a possible maximal number of
+ * 16 enabled PFs, in case there are not enough ILT lines -
+ * allocate only first PF as RoCE and have all the other ETH
+ * only with less ILT lines.
+ */
+ if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
+ resc_num[ECORE_ILT] = resc_num[ECORE_ILT];
+ }
+#endif
+
+ for (i = 0; i < ECORE_MAX_RESC; i++)
+ resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+#ifndef ASIC_ONLY
+ /* Correct the common ILT calculation if PF0 has more */
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
+ p_hwfn->p_dev->b_is_emul_full &&
+ p_hwfn->rel_pf_id && resc_num[ECORE_ILT])
+ resc_start[ECORE_ILT] += resc_num[ECORE_ILT];
+#endif
+
+ /* Sanity for ILT */
+ if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+ (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't assign ILT pages [%08x,...,%08x]\n",
+ RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
+ ECORE_ILT) -
+ 1);
+ return ECORE_INVAL;
+ }
+
+ ecore_hw_set_feat(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "The numbers for each resource are:\n"
+ "SB = %d start = %d\n"
+ "L2_QUEUE = %d start = %d\n"
+ "VPORT = %d start = %d\n"
+ "PQ = %d start = %d\n"
+ "RL = %d start = %d\n"
+ "MAC = %d start = %d\n"
+ "VLAN = %d start = %d\n"
+ "ILT = %d start = %d\n"
+ "CMDQS_CQS = %d start = %d\n",
+ RESC_NUM(p_hwfn, ECORE_SB), RESC_START(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
+ RESC_START(p_hwfn, ECORE_L2_QUEUE),
+ RESC_NUM(p_hwfn, ECORE_VPORT),
+ RESC_START(p_hwfn, ECORE_VPORT),
+ RESC_NUM(p_hwfn, ECORE_PQ), RESC_START(p_hwfn, ECORE_PQ),
+ RESC_NUM(p_hwfn, ECORE_RL), RESC_START(p_hwfn, ECORE_RL),
+ RESC_NUM(p_hwfn, ECORE_MAC), RESC_START(p_hwfn, ECORE_MAC),
+ RESC_NUM(p_hwfn, ECORE_VLAN),
+ RESC_START(p_hwfn, ECORE_VLAN),
+ RESC_NUM(p_hwfn, ECORE_ILT), RESC_START(p_hwfn, ECORE_ILT),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS),
+ RESC_START(p_hwfn, ECORE_CMDQS_CQS));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ u32 port_cfg_addr, link_temp, device_capabilities;
+ struct ecore_mcp_link_params *link;
+
+ /* Read global nvm_cfg address */
+ u32 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+ /* Verify MCP has initialized it */
+ if (nvm_cfg_addr == 0) {
+ DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+ return ECORE_INVAL;
+ }
+
+ /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
+ core_cfg);
+
+ core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
+
+ switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+ NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
+ break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
+ core_cfg);
+ break;
+ }
+
+ /* Read default link configuration */
+ link = &p_hwfn->mcp_info->link_input;
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ link_temp = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
+ link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link->speed.advertised_speeds = link_temp;
+
+ link_temp = link->speed.advertised_speeds;
+ p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
+
+ link_temp = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, link_settings));
+ switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+ NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+ link->speed.autoneg = true;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+ link->speed.forced_speed = 1000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+ link->speed.forced_speed = 10000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+ link->speed.forced_speed = 25000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+ link->speed.forced_speed = 40000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+ link->speed.forced_speed = 50000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ link->speed.forced_speed = 100000;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
+ }
+
+ link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+ link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+ link->pause.autoneg = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ link->pause.forced_rx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ link->pause.forced_tx = !!(link_temp &
+ NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ link->loopback_mode = 0;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x,"
+ " AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ link->speed.forced_speed, link->speed.advertised_speeds,
+ link->speed.autoneg, link->pause.autoneg);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+
+ generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
+
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+ p_hwfn->p_dev->mf_mode);
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+
+ device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
+ &p_hwfn->hw_info.device_capabilities);
+
+ return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 num_funcs;
+ u32 tmp, mask;
+
+ num_funcs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_PFS_K2
+ : MAX_NUM_PFS_BB;
+
+ /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+ * in the other bits are selected.
+ * Bits 1-15 are for functions 1-15, respectively, and their value is
+ * '0' only for enabled functions (function 0 always exists and
+ * enabled).
+ * In case of CMT, only the "even" functions are enabled, and thus the
+ * number of functions for both hwfns is learnt from the same bits.
+ */
+
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+ if (tmp & 0x1) {
+ if (ECORE_PATH_ID(p_hwfn) && p_hwfn->p_dev->num_hwfns == 1) {
+ num_funcs = 0;
+ mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ mask = 0x5554;
+ }
+
+ tmp = (tmp ^ 0xffffffff) & mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ num_funcs++;
+ tmp >>= 0x1;
+ }
+ }
+
+ p_hwfn->num_funcs_on_engine = num_funcs;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Limit number of PFs to 4 [would affect"
+ " resource allocation, needed for IOV]\n");
+ p_hwfn->num_funcs_on_engine = 4;
+ }
+#endif
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "num_funcs_on_engine = %d\n",
+ p_hwfn->num_funcs_on_engine);
+}
+
+static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 port_mode;
+
+#ifndef ASIC_ONLY
+ /* Read the port mode */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ port_mode = 4;
+ else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
+ (p_hwfn->p_dev->num_hwfns > 1))
+ /* In CMT on emulation, assume 1 port */
+ port_mode = 1;
+ else
+#endif
+ port_mode = ecore_rd(p_hwfn, p_ptt,
+ CNIG_REG_NW_PORT_MODE_BB_B0);
+
+ if (port_mode < 3) {
+ p_hwfn->p_dev->num_ports_in_engines = 1;
+ } else if (port_mode <= 5) {
+ p_hwfn->p_dev->num_ports_in_engines = 2;
+ } else {
+ DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
+ p_hwfn->p_dev->num_ports_in_engines);
+
+ /* Default num_ports_in_engines to something */
+ p_hwfn->p_dev->num_ports_in_engines = 1;
+ }
+}
+
+static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 port;
+ int i;
+
+ p_hwfn->p_dev->num_ports_in_engines = 0;
+
+ for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+ port = ecore_rd(p_hwfn, p_ptt,
+ CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ if (port & 1)
+ p_hwfn->p_dev->num_ports_in_engines++;
+ }
+}
+
+static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
+ else
+ ecore_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t
+ecore_get_hw_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality personality)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_iov_hw_info(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ return rc;
+
+ /* TODO In get_hw_info, amoungst others:
+ * Get MCP FW revision and determine according to it the supported
+ * featrues (e.g. DCB)
+ * Get boot mode
+ * ecore_get_pcie_width_speed, WOL capability.
+ * Number of global CQ-s (for storage
+ */
+ ecore_hw_info_port_num(p_hwfn, p_ptt);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+#endif
+ ecore_hw_get_nvm_info(p_hwfn, p_ptt);
+
+ rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
+#endif
+ OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
+ p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
+#ifndef ASIC_ONLY
+ } else {
+ static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 };
+
+ OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
+ p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
+ }
+#endif
+
+ if (ecore_mcp_is_init(p_hwfn)) {
+ if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
+ p_hwfn->hw_info.ovlan =
+ p_hwfn->mcp_info->func_info.ovlan;
+
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+
+ if (personality != ECORE_PCI_DEFAULT)
+ p_hwfn->hw_info.personality = personality;
+ else if (ecore_mcp_is_init(p_hwfn))
+ p_hwfn->hw_info.personality =
+ p_hwfn->mcp_info->func_info.protocol;
+
+#ifndef ASIC_ONLY
+ /* To overcome ILT lack for emulation, until at least until we'll have
+ * a definite answer from system about it, allow only PF0 to be RoCE.
+ */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+#endif
+
+ ecore_get_num_funcs(p_hwfn, p_ptt);
+
+ /* Feat num is dependent on personality and on the number of functions
+ * on the engine. Therefore it should be come after personality
+ * initialization and after getting the number of functions.
+ */
+ return ecore_hw_get_resc(p_hwfn);
+}
+
+/* @TMP - this should move to a proper .h */
+#define CHIP_NUM_AH 0x8070
+
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 tmp;
+
+ /* Read Vendor Id / Device Id */
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
+ &p_dev->vendor_id);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
+ &p_dev->device_id);
+
+ p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CHIP_NUM);
+ p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CHIP_REV);
+
+ MASK_FIELD(CHIP_REV, p_dev->chip_rev);
+
+ /* Determine type */
+ if (p_dev->device_id == CHIP_NUM_AH)
+ p_dev->type = ECORE_DEV_TYPE_AH;
+ else
+ p_dev->type = ECORE_DEV_TYPE_BB;
+
+ /* Learn number of HW-functions */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+ if (tmp & (1 << p_hwfn->rel_pf_id)) {
+ DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
+ p_dev->num_hwfns = 2;
+ } else {
+ p_dev->num_hwfns = 1;
+ }
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ /* For some reason we have problems with this register
+ * in B0 emulation; Simply assume no CMT
+ */
+ DP_NOTICE(p_dev->hwfns, false,
+ "device on emul - assume no CMT\n");
+ p_dev->num_hwfns = 1;
+ }
+#endif
+
+ p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CHIP_TEST_REG) >> 4;
+ MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
+ p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_CHIP_METAL);
+ MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
+ DP_INFO(p_dev->hwfns,
+ "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x"
+ " Metal: %04x\n",
+ ECORE_IS_BB(p_dev) ? "BB" : "AH",
+ CHIP_REV_IS_A0(p_dev) ? 0 : 1,
+ p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
+ p_dev->chip_metal);
+
+ if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
+ DP_NOTICE(p_dev->hwfns, false,
+ "The chip type/rev (BB A0) is not supported!\n");
+ return ECORE_ABORTED;
+ }
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ MISCS_REG_ECO_RESERVED);
+ if (tmp & (1 << 29)) {
+ DP_NOTICE(p_hwfn, false,
+ "Emulation: Running on a FULL build\n");
+ p_dev->b_is_emul_full = true;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "Emulation: Running on a REDUCED build\n");
+ }
+ }
+#endif
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_prepare_hibernate(struct ecore_dev *p_dev)
+{
+ int j;
+
+ if (IS_VF(p_dev))
+ return;
+
+ for_each_hwfn(p_dev, j) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+ "Mark hw/fw uninitialized\n");
+
+ p_hwfn->hw_init_done = false;
+ p_hwfn->first_on_engine = false;
+ }
+}
+
+static enum _ecore_status_t
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM *p_regview,
+ void OSAL_IOMEM *p_doorbells,
+ enum ecore_pci_personality personality)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+ DP_ERR(p_hwfn,
+ "Reading the ME register returns all Fs;"
+ " Preventing further chip access\n");
+ return ECORE_INVAL;
+ }
+
+ get_function_id(p_hwfn);
+
+ /* Allocate PTT pool */
+ rc = ecore_ptt_pool_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+ goto err0;
+ }
+
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+ /* First hwfn learns basic information, e.g., number of hwfns */
+ if (!p_hwfn->my_id) {
+ rc = ecore_get_dev_info(p_hwfn->p_dev);
+ if (rc != ECORE_SUCCESS)
+ goto err1;
+ }
+
+ ecore_hw_hwfn_prepare(p_hwfn);
+
+ /* Initialize MCP structure */
+ rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+ goto err1;
+ }
+
+ /* Read the device configuration information from the HW and SHMEM */
+ rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
+ goto err2;
+ }
+
+ /* Allocate the init RT array and initialize the init-ops engine */
+ rc = ecore_init_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+ goto err2;
+ }
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: workaround; Prevent DMAE parities\n");
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
+
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: workaround: Set VF bar0 size\n");
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_VF_BAR0_SIZE, 4);
+ }
+#endif
+
+ return rc;
+err2:
+ ecore_mcp_free(p_hwfn);
+err1:
+ ecore_hw_hwfn_free(p_hwfn);
+err0:
+ return rc;
+}
+
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ enum _ecore_status_t rc;
+
+ if (IS_VF(p_dev))
+ return ecore_vf_hw_prepare(p_dev);
+
+ /* Store the precompiled init data ptrs */
+ ecore_init_iro_array(p_dev);
+
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = ecore_hw_prepare_single(p_hwfn,
+ p_dev->regview,
+ p_dev->doorbells, personality);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ personality = p_hwfn->hw_info.personality;
+
+ /* initialalize 2nd hwfn if necessary */
+ if (p_dev->num_hwfns > 1) {
+ void OSAL_IOMEM *p_regview, *p_doorbell;
+ u8 OSAL_IOMEM *addr;
+
+ /* adjust bar offset for second engine */
+ addr = (u8 OSAL_IOMEM *)p_dev->regview +
+ ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ p_regview = (void OSAL_IOMEM *)addr;
+
+ addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
+ ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ p_doorbell = (void OSAL_IOMEM *)addr;
+
+ /* prepare second hw function */
+ rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
+ p_doorbell, personality);
+
+ /* in case of error, need to free the previously
+ * initialiazed hwfn 0
+ */
+ if (rc != ECORE_SUCCESS) {
+ ecore_init_free(p_hwfn);
+ ecore_mcp_free(p_hwfn);
+ ecore_hw_hwfn_free(p_hwfn);
+ return rc;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_hw_remove(struct ecore_dev *p_dev)
+{
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_release(p_hwfn);
+ continue;
+ }
+
+ ecore_init_free(p_hwfn);
+ ecore_hw_hwfn_free(p_hwfn);
+ ecore_mcp_free(p_hwfn);
+
+ OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+ }
+}
+
+static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
+ dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+ struct ecore_chain_next *p_next;
+ u32 size, i;
+
+ if (!p_virt)
+ return;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ if (!p_virt)
+ break;
+
+ p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
+ p_virt_next = p_next->next_virt;
+ p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+ OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+
+ p_virt = p_virt_next;
+ p_phys = p_phys_next;
+ }
+}
+
+static void ecore_chain_free_single(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ if (!p_chain->p_virt_addr)
+ return;
+
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
+ p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
+}
+
+static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table;
+ u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+
+ if (!pp_virt_addr_tbl)
+ return;
+
+ if (!p_chain->pbl.p_virt_table)
+ goto out;
+
+ for (i = 0; i < page_cnt; i++) {
+ if (!pp_virt_addr_tbl[i])
+ break;
+
+ OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
+ *(dma_addr_t *)p_pbl_virt,
+ ECORE_CHAIN_PAGE_SIZE);
+
+ p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
+ p_chain->pbl.p_phys_table, pbl_size);
+out:
+ OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ switch (p_chain->mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ ecore_chain_free_next_ptr(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ ecore_chain_free_single(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ ecore_chain_free_pbl(p_dev, p_chain);
+ break;
+ }
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
+ enum ecore_chain_cnt_type cnt_type,
+ osal_size_t elem_size, u32 page_cnt)
+{
+ u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+ /* The actual chain size can be larger than the maximal possible value
+ * after rounding up the requested elements number to pages, and after
+ * taking into acount the unusuable elements (next-ptr elements).
+ * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+ * size/capacity fields are of a u32 type.
+ */
+ if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
+ chain_size > ((u32)ECORE_U16_MAX + 1)) ||
+ (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
+ chain_size > ECORE_U32_MAX)) {
+ DP_NOTICE(p_dev, true,
+ "The actual chain size (0x%lx) is larger than"
+ " the maximal possible value\n",
+ (unsigned long)chain_size);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
+ dma_addr_t p_phys = 0;
+ u32 i;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ if (i == 0) {
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+ } else {
+ ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_virt, p_phys);
+ }
+
+ p_virt_prev = p_virt;
+ }
+ /* Last page's next element should point to the beginning of the
+ * chain.
+ */
+ ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+ void *p_virt = OSAL_NULL;
+ dma_addr_t p_phys = 0;
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain)
+{
+ void *p_virt = OSAL_NULL;
+ u8 *p_pbl_virt = OSAL_NULL;
+ void **pp_virt_addr_tbl = OSAL_NULL;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ u32 page_cnt = p_chain->page_cnt, size, i;
+
+ size = page_cnt * sizeof(*pp_virt_addr_tbl);
+ pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
+ if (!pp_virt_addr_tbl) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate memory for the chain"
+ " virtual addresses table\n");
+ return ECORE_NOMEM;
+ }
+ OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
+
+ /* The allocation of the PBL table is done with its full size, since it
+ * is expected to be successive.
+ */
+ size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+ p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+ if (!p_pbl_virt) {
+ DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
+ return ECORE_NOMEM;
+ }
+
+ ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+ pp_virt_addr_tbl);
+
+ for (i = 0; i < page_cnt; i++) {
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ ECORE_CHAIN_PAGE_SIZE);
+ if (!p_virt) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate chain memory\n");
+ return ECORE_NOMEM;
+ }
+
+ if (i == 0) {
+ ecore_chain_init_mem(p_chain, p_virt, p_phys);
+ ecore_chain_reset(p_chain);
+ }
+
+ /* Fill the PBL table with the physical address of the page */
+ *(dma_addr_t *)p_pbl_virt = p_phys;
+ /* Keep the virtual address of the page */
+ p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+ p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ u32 num_elems, osal_size_t elem_size,
+ struct ecore_chain *p_chain)
+{
+ u32 page_cnt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (mode == ECORE_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+ rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
+ page_cnt);
+ if (rc) {
+ DP_NOTICE(p_dev, true,
+ "Cannot allocate a chain with the given arguments:\n"
+ " [use_mode %d, mode %d, cnt_type %d, num_elems %d,"
+ " elem_size %zu]\n",
+ intended_use, mode, cnt_type, num_elems, elem_size);
+ return rc;
+ }
+
+ ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
+ mode, cnt_type);
+
+ switch (mode) {
+ case ECORE_CHAIN_MODE_NEXT_PTR:
+ rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_SINGLE:
+ rc = ecore_chain_alloc_single(p_dev, p_chain);
+ break;
+ case ECORE_CHAIN_MODE_PBL:
+ rc = ecore_chain_alloc_pbl(p_dev, p_chain);
+ break;
+ }
+ if (rc)
+ goto nomem;
+
+ return ECORE_SUCCESS;
+
+nomem:
+ ecore_chain_free(p_dev, p_chain);
+ return rc;
+}
+
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+ u16 src_id, u16 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ u16 min, max;
+
+ min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ DP_NOTICE(p_hwfn, true,
+ "l2_queue id [%d] is not valid, available"
+ " indices [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
+ DP_NOTICE(p_hwfn, true,
+ "vport id [%d] is not valid, available"
+ " indices [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
+ DP_NOTICE(p_hwfn, true,
+ "rss_eng id [%d] is not valid,avail idx [%d - %d]\n",
+ src_id, min, max);
+
+ return ECORE_INVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter)
+{
+ u32 high, low, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return ECORE_SUCCESS;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
+ p_filter[0], p_filter[1], p_filter[2],
+ p_filter[3], p_filter[4], p_filter[5], i);
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high, low;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter\n");
+}
+
+enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 filter)
+{
+ u32 high, low, en;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return ECORE_SUCCESS;
+
+ high = filter;
+ low = 0;
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ETH type: %x is added at %d\n", filter, i);
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 filter)
+{
+ u32 high, low;
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ high = filter;
+ low = 0;
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter\n");
+}
+
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ int i;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE +
+ (2 * i + 1) * sizeof(u32), 0);
+ }
+}
+
+enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 reg_tbl[] = {
+ BRB_REG_HEADER_SIZE,
+ BTB_REG_HEADER_SIZE,
+ CAU_REG_LONG_TIMEOUT_THRESHOLD,
+ CCFC_REG_ACTIVITY_COUNTER,
+ CDU_REG_CID_ADDR_PARAMS,
+ DBG_REG_CLIENT_ENABLE,
+ DMAE_REG_INIT,
+ DORQ_REG_IFEN,
+ GRC_REG_TIMEOUT_EN,
+ IGU_REG_BLOCK_CONFIGURATION,
+ MCM_REG_INIT,
+ MCP2_REG_DBG_DWORD_ENABLE,
+ MISC_REG_PORT_MODE,
+ MISCS_REG_CLK_100G_MODE,
+ MSDM_REG_ENABLE_IN1,
+ MSEM_REG_ENABLE_IN,
+ NIG_REG_CM_HDR,
+ NCSI_REG_CONFIG,
+ PBF_REG_INIT,
+ PTU_REG_ATC_INIT_ARRAY,
+ PCM_REG_INIT,
+ PGLUE_B_REG_ADMIN_PER_PF_REGION,
+ PRM_REG_DISABLE_PRM,
+ PRS_REG_SOFT_RST,
+ PSDM_REG_ENABLE_IN1,
+ PSEM_REG_ENABLE_IN,
+ PSWRQ_REG_DBG_SELECT,
+ PSWRQ2_REG_CDUT_P_SIZE,
+ PSWHST_REG_DISCARD_INTERNAL_WRITES,
+ PSWHST2_REG_DBGSYN_ALMOST_FULL_THR,
+ PSWRD_REG_DBG_SELECT,
+ PSWRD2_REG_CONF11,
+ PSWWR_REG_USDM_FULL_TH,
+ PSWWR2_REG_CDU_FULL_TH2,
+ QM_REG_MAXPQSIZE_0,
+ RSS_REG_RSS_INIT_EN,
+ RDIF_REG_STOP_ON_ERROR,
+ SRC_REG_SOFT_RST,
+ TCFC_REG_ACTIVITY_COUNTER,
+ TCM_REG_INIT,
+ TM_REG_PXP_READ_DATA_FIFO_INIT,
+ TSDM_REG_ENABLE_IN1,
+ TSEM_REG_ENABLE_IN,
+ TDIF_REG_STOP_ON_ERROR,
+ UCM_REG_INIT,
+ UMAC_REG_IPG_HD_BKP_CNTL_BB_B0,
+ USDM_REG_ENABLE_IN1,
+ USEM_REG_ENABLE_IN,
+ XCM_REG_INIT,
+ XSDM_REG_ENABLE_IN1,
+ XSEM_REG_ENABLE_IN,
+ YCM_REG_INIT,
+ YSDM_REG_ENABLE_IN1,
+ YSEM_REG_ENABLE_IN,
+ XYLD_REG_SCBD_STRICT_PRIO,
+ TMLD_REG_SCBD_STRICT_PRIO,
+ MULD_REG_SCBD_STRICT_PRIO,
+ YULD_REG_SCBD_STRICT_PRIO,
+ };
+ u32 test_val[] = { 0x0, 0x1 };
+ u32 val, save_val, i, j;
+
+ for (i = 0; i < OSAL_ARRAY_SIZE(test_val); i++) {
+ for (j = 0; j < OSAL_ARRAY_SIZE(reg_tbl); j++) {
+ save_val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
+ ecore_wr(p_hwfn, p_ptt, reg_tbl[j], test_val[i]);
+ val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
+ /* Restore the original register's value */
+ ecore_wr(p_hwfn, p_ptt, reg_tbl[j], save_val);
+ if (val != test_val[i]) {
+ DP_INFO(p_hwfn->p_dev,
+ "offset 0x%x: val 0x%x != 0x%x\n",
+ reg_tbl[j], val, test_val[i]);
+ return ECORE_AGAIN;
+ }
+ }
+ }
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr, void *p_qzone,
+ osal_size_t qzone_size,
+ u8 timeset)
+{
+ struct coalescing_timeset *p_coalesce_timeset;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
+ return ECORE_INVAL;
+ }
+
+ if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
+ DP_NOTICE(p_hwfn, true,
+ "Coalescing configuration not enabled\n");
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEMSET(p_qzone, 0, qzone_size);
+ p_coalesce_timeset = p_qzone;
+ p_coalesce_timeset->timeset = timeset;
+ p_coalesce_timeset->valid = 1;
+ ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_qzone, qzone_size);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 coalesce, u8 qid)
+{
+ struct ustorm_eth_queue_zone qzone;
+ u16 fw_qid = 0;
+ u32 address;
+ u8 timeset;
+ enum _ecore_status_t rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+ /* Translate the coalescing time into a timeset, according to:
+ * Timeout[Rx] = TimeSet[Rx] << (TimerRes[Rx] + 1)
+ */
+ timeset = coalesce >> (ECORE_CAU_DEF_RX_TIMER_RES + 1);
+
+ rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
+ sizeof(struct ustorm_eth_queue_zone), timeset);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
+out:
+ return rc;
+}
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 coalesce, u8 qid)
+{
+ struct ystorm_eth_queue_zone qzone;
+ u16 fw_qid = 0;
+ u32 address;
+ u8 timeset;
+ enum _ecore_status_t rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ address = BAR0_MAP_REG_YSDM_RAM + YSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+ /* Translate the coalescing time into a timeset, according to:
+ * Timeout[Tx] = TimeSet[Tx] << (TimerRes[Tx] + 1)
+ */
+ timeset = coalesce >> (ECORE_CAU_DEF_TX_TIMER_RES + 1);
+
+ rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
+ sizeof(struct ystorm_eth_queue_zone), timeset);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
+out:
+ return rc;
+}
+
+/* Calculate final WFQ values for all vports and configure it.
+ * After this configuration each vport must have
+ * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
+ */
+static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i, num_vports;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ for (i = 0; i < num_vports; i++) {
+ u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ vport_params[i].vport_wfq =
+ (wfq_speed * ECORE_WFQ_UNIT) / min_pf_rate;
+ ecore_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+static void
+ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+{
+ int i, num_vports;
+ u32 min_speed;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+ min_speed = min_pf_rate / num_vports;
+
+ for (i = 0; i < num_vports; i++) {
+ p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+ p_hwfn->qm_info.wfq_data[i].default_min_speed = min_speed;
+ }
+}
+
+static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i, num_vports;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ for (i = 0; i < num_vports; i++) {
+ ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+ ecore_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+/* validate wfq for a given vport and required min rate */
+static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
+ u16 vport_id, u32 req_rate,
+ u32 min_pf_rate)
+{
+ u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+ int non_requested_count = 0, req_count = 0, i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ /* Check pre-set data for some of the vports */
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+
+ if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
+ req_count++;
+ tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+ total_req_min_rate += tmp_speed;
+ }
+ }
+
+ /* Include current vport data as well */
+ req_count++;
+ total_req_min_rate += req_rate;
+ non_requested_count = p_hwfn->qm_info.num_vports - req_count;
+
+ /* validate possible error cases */
+ if (req_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is greater"
+ " than configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ if (req_rate * ECORE_WFQ_UNIT / min_pf_rate < 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is less than"
+ " one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ /* TBD - for number of vports greater than 100 */
+ if (ECORE_WFQ_UNIT / p_hwfn->qm_info.num_vports < 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Number of vports are greater than 100\n");
+ return ECORE_INVAL;
+ }
+
+ if (total_req_min_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Total requested min rate for all vports[%d Mbps]"
+ "is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
+ return ECORE_INVAL;
+ }
+
+ /* Data left for non requested vports */
+ total_left_rate = min_pf_rate - total_req_min_rate;
+ left_rate_per_vp = total_left_rate / non_requested_count;
+
+ /* validate if non requested get < 1% of min bw */
+ if (left_rate_per_vp * ECORE_WFQ_UNIT / min_pf_rate < 1)
+ return ECORE_INVAL;
+
+ /* now req_rate for given vport passes all scenarios.
+ * assign final wfq rates to all vports.
+ */
+ p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+ p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 vp_id, u32 rate)
+{
+ struct ecore_mcp_link_state *p_link;
+ int rc = ECORE_SUCCESS;
+
+ p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
+
+ if (!p_link->min_pf_rate) {
+ p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+ p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+ return rc;
+ }
+
+ rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+ else
+ DP_NOTICE(p_hwfn, false,
+ "Validation failed while configuring min rate\n");
+
+ return rc;
+}
+
+static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ int rc = ECORE_SUCCESS;
+ bool use_wfq = false;
+ u16 i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ /* Validate all pre configured vports for wfq */
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured) {
+ u32 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ use_wfq = true;
+ rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc == ECORE_INVAL) {
+ DP_NOTICE(p_hwfn, false,
+ "Validation failed while"
+ " configuring min rate\n");
+ break;
+ }
+ }
+ }
+
+ if (rc == ECORE_SUCCESS && use_wfq)
+ ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ else
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+ return rc;
+}
+
+/* Main API for ecore clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
+{
+ int i, rc = ECORE_INVAL;
+
+ /* TBD - for multiple hardware functions - that is 100 gig */
+ if (p_dev->num_hwfns > 1) {
+ DP_NOTICE(p_dev, false,
+ "WFQ configuration is not supported for this dev\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_ptt *p_ptt;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+ if (rc != ECORE_SUCCESS) {
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ u32 min_pf_rate)
+{
+ int i;
+
+ /* TBD - for multiple hardware functions - that is 100 gig */
+ if (p_dev->num_hwfns > 1) {
+ DP_VERBOSE(p_dev, ECORE_MSG_LINK,
+ "WFQ configuration is not supported for this dev\n");
+ return;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+ p_hwfn->p_dpc_ptt,
+ min_pf_rate);
+ }
+}
+
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 max_bw)
+{
+ int rc = ECORE_SUCCESS;
+
+ p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->speed = (p_link->line_speed * max_bw) / 100;
+
+ rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_link->speed);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+
+ return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
+{
+ int i, rc = ECORE_INVAL;
+
+ if (max_bw < 1 || max_bw > 100) {
+ DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_link_state *p_link;
+ struct ecore_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+ if (rc != ECORE_SUCCESS) {
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 min_bw)
+{
+ int rc = ECORE_SUCCESS;
+
+ p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+ rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configured MIN bandwidth to be %d Mb/sec\n",
+ p_link->min_pf_rate);
+
+ return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
+{
+ int i, rc = ECORE_INVAL;
+
+ if (min_bw < 1 || min_bw > 100) {
+ DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_link_state *p_link;
+ struct ecore_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
+
+ rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ if (rc != ECORE_SUCCESS) {
+ ecore_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ if (p_link->min_pf_rate) {
+ u32 min_rate = p_link->min_pf_rate;
+
+ rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+ p_ptt,
+ min_rate);
+ }
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_link_state *p_link;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+
+ if (p_link->min_pf_rate)
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+
+ OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
+ sizeof(*p_hwfn->qm_info.wfq_data) *
+ p_hwfn->qm_info.num_vports);
+}
+
+int ecore_device_num_engines(struct ecore_dev *p_dev)
+{
+ return ECORE_IS_BB(p_dev) ? 2 : 1;
+}
+
+int ecore_device_num_ports(struct ecore_dev *p_dev)
+{
+ /* in CMT always only one port */
+ if (p_dev->num_hwfns > 1)
+ return 1;
+
+ return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
+}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
new file mode 100644
index 00000000..535b82b5
--- /dev/null
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DEV_API_H__
+#define __ECORE_DEV_API_H__
+
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_int_api.h"
+
+struct ecore_tunn_start_params;
+
+/**
+ * @brief ecore_init_dp - initialize the debug level
+ *
+ * @param p_dev
+ * @param dp_module
+ * @param dp_level
+ * @param dp_ctx
+ */
+void ecore_init_dp(struct ecore_dev *p_dev,
+ u32 dp_module, u8 dp_level, void *dp_ctx);
+
+/**
+ * @brief ecore_init_struct - initialize the device structure to
+ * its defaults
+ *
+ * @param p_dev
+ */
+void ecore_init_struct(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_free -
+ *
+ * @param p_dev
+ */
+void ecore_resc_free(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_alloc -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_setup -
+ *
+ * @param p_dev
+ */
+void ecore_resc_setup(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_init -
+ *
+ * @param p_dev
+ * @param p_tunn - tunneling parameters
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ * for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ * Pass NULL if not using binary fw file.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+ struct ecore_tunn_start_params *p_tunn,
+ bool b_hw_start,
+ enum ecore_int_mode int_mode,
+ bool allow_npar_tx_switch,
+ const u8 *bin_fw_data);
+
+/**
+ * @brief ecore_hw_timers_stop_all -
+ *
+ * @param p_dev
+ *
+ * @return void
+ */
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop_fastpath -should be called incase
+ * slowpath is still required for the device, but
+ * fastpath is not.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_prepare_hibernate -should be called when
+ * the system is going into the hibernate state
+ *
+ * @param p_dev
+ *
+ */
+void ecore_prepare_hibernate(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_start_fastpath -restart fastpath traffic,
+ * only if hw_stop_fastpath was called
+
+ * @param p_dev
+ *
+ */
+void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_hw_reset -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_prepare -
+ *
+ * @param p_dev
+ * @param personality - personality to initialize
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality);
+
+/**
+ * @brief ecore_hw_remove -
+ *
+ * @param p_dev
+ */
+void ecore_hw_remove(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct ecore_ptt
+ */
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+#ifndef __EXTRACT__LINUX__
+struct ecore_eth_stats {
+ u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 tpa_coalesced_pkts;
+ u64 tpa_coalesced_events;
+ u64 tpa_aborts_num;
+ u64 tpa_not_coalesced_pkts;
+ u64 tpa_coalesced_bytes;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 rx_mac_bytes;
+ u64 rx_mac_uc_packets;
+ u64 rx_mac_mc_packets;
+ u64 rx_mac_bc_packets;
+ u64 rx_mac_frames_ok;
+ u64 tx_mac_bytes;
+ u64 tx_mac_uc_packets;
+ u64 tx_mac_mc_packets;
+ u64 tx_mac_bc_packets;
+ u64 tx_mac_ctrl_frames;
+};
+#endif
+
+enum ecore_dmae_address_type_t {
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ ECORE_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define ECORE_DMAE_FLAG_VF_SRC 0x00000002
+#define ECORE_DMAE_FLAG_VF_DST 0x00000004
+#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct ecore_dmae_params {
+ u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
+ u8 src_vfid;
+ u8 dst_vfid;
+};
+
+/**
+* @brief ecore_dmae_host2grc - copy data from source addr to
+* dmae registers using the given ptt
+*
+* @param p_hwfn
+* @param p_ptt
+* @param source_addr
+* @param grc_addr (dmae_data_offset)
+* @param size_in_dwords
+* @param flags (one of the flags defined above)
+*/
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr, u32 size_in_dwords, u32 flags);
+
+/**
+* @brief ecore_dmae_grc2host - Read data from dmae data offset
+* to source address using the given ptt
+*
+* @param p_ptt
+* @param grc_addr (dmae_data_offset)
+* @param dest_addr
+* @param size_in_dwords
+* @param flags - one of the flags defined above
+*/
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags);
+
+/**
+* @brief ecore_dmae_host2host - copy data from to source address
+* to a destination address (for SRIOV) using the given ptt
+*
+* @param p_hwfn
+* @param p_ptt
+* @param source_addr
+* @param dest_addr
+* @param size_in_dwords
+* @param params
+*/
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct ecore_dmae_params *p_params);
+
+/**
+ * @brief ecore_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_chain_alloc(struct ecore_dev *p_dev,
+ enum ecore_chain_use_mode intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ u32 num_elems,
+ osal_size_t elem_size, struct ecore_chain *p_chain);
+
+/**
+ * @brief ecore_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain);
+
+/**
+ * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+ u16 src_id, u16 *dst_id);
+
+/**
+ * @@brief ecore_fw_vport - Get absolute vport ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id);
+
+/**
+ * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
+ *
+ * @param p_hwfn
+ * @param src_id - relative to p_hwfn
+ * @param dst_id - absolute per engine
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+ u8 src_id, u8 *dst_id);
+
+/**
+ * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter);
+
+/**
+ * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter);
+
+/**
+ * @brief ecore_llh_add_ethertype_filter - configures a ethertype filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param filter - ethertype to add
+ */
+enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 filter);
+
+/**
+ * @brief ecore_llh_remove_ethertype_filter - removes a ethertype llh filter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param filter - ethertype to remove
+ */
+void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 filter);
+
+/**
+ * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+ /**
+*@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 id, bool is_vf);
+
+/**
+ * @brief ecore_test_registers - Perform register tests
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 coalesce, u8 qid);
+
+/**
+ * @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 coalesce, u8 qid);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
new file mode 100644
index 00000000..cc49fc7b
--- /dev/null
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef GTT_REG_ADDR_H
+#define GTT_REG_ADDR_H
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
+
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
+
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
+
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
+
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
+
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+
+#endif
diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h
new file mode 100644
index 00000000..f2efe24e
--- /dev/null
+++ b/drivers/net/qede/base/ecore_gtt_values.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __PREVENT_PXP_GLOBAL_WIN__
+
+static u32 pxp_global_win[] = {
+ 0,
+ 0,
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+ 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+#endif /* __PREVENT_PXP_GLOBAL_WIN__ */
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
new file mode 100644
index 00000000..e341b95c
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_COMMON__
+#define __ECORE_HSI_COMMON__
+/********************************/
+/* Add include to common target */
+/********************************/
+#include "common_hsi.h"
+
+/*
+ * opcodes for the event ring
+ */
+enum common_event_opcode {
+ COMMON_EVENT_PF_START,
+ COMMON_EVENT_PF_STOP,
+ COMMON_EVENT_VF_START,
+ COMMON_EVENT_VF_STOP,
+ COMMON_EVENT_VF_PF_CHANNEL,
+ COMMON_EVENT_VF_FLR,
+ COMMON_EVENT_PF_UPDATE,
+ COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_EMPTY,
+ MAX_COMMON_EVENT_OPCODE
+};
+
+/*
+ * Common Ramrod Command IDs
+ */
+enum common_ramrod_cmd_id {
+ COMMON_RAMROD_UNUSED,
+ COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+ COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_VF_START /* VF Function Start */,
+ COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
+ COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
+ COMMON_RAMROD_EMPTY /* Empty Ramrod */,
+ MAX_COMMON_RAMROD_CMD_ID
+};
+
+/*
+ * The core storm context for the Ystorm
+ */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * The core storm context for the Pstorm
+ */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * Core Slowpath Connection storm context of Xstorm
+ */
+struct xstorm_core_conn_st_ctx {
+ __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+ __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+ struct regpair consolid_base_addr /* Consolidation Ring Base Address */
+ ;
+ __le16 spq_cons /* SPQ Ring Consumer */;
+ __le16 consolid_cons /* Consolidation Ring Consumer */;
+ __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 core_state /* state */;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2 /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 consolid_prod /* physical_q1 */;
+ __le16 reserved16 /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_or_spq_prod /* word4 */;
+ __le16 word5 /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+struct tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
+/*
+ * The core storm context for the Mstorm
+ */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/*
+ * The core storm context for the Ustorm
+ */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * core connection context
+ */
+struct core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context
+ /* ystorm storm context */;
+ struct regpair ystorm_st_padding[2] /* padding */;
+ struct pstorm_core_conn_st_ctx pstorm_st_context
+ /* pstorm storm context */;
+ struct regpair pstorm_st_padding[2] /* padding */;
+ struct xstorm_core_conn_st_ctx xstorm_st_context
+ /* xstorm storm context */;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context
+ /* xstorm aggregative context */;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context
+ /* tstorm aggregative context */;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context
+ /* ustorm aggregative context */;
+ struct mstorm_core_conn_st_ctx mstorm_st_context
+ /* mstorm storm context */;
+ struct ustorm_core_conn_st_ctx ustorm_st_context
+ /* ustorm storm context */;
+ struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+/*
+ * How ll2 should deal with packet upon errors
+ */
+enum core_error_handle {
+ LL2_DROP_PACKET /* If error occurs drop packet */,
+ LL2_DO_NOTHING /* If error occurs do nothing */,
+ LL2_ASSERT /* If error occurs assert */,
+ MAX_CORE_ERROR_HANDLE
+};
+
+/*
+ * opcodes for the event ring
+ */
+enum core_event_opcode {
+ CORE_EVENT_TX_QUEUE_START,
+ CORE_EVENT_TX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_START,
+ CORE_EVENT_RX_QUEUE_STOP,
+ MAX_CORE_EVENT_OPCODE
+};
+
+/*
+ * The L4 pseudo checksum mode for Core
+ */
+enum core_l4_pseudo_checksum_mode {
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH
+ ,
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH
+ /* Pseudo Checksum on packet is calculated with zero length. */,
+ MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_port_stats {
+ struct regpair gsi_invalid_hdr;
+ struct regpair gsi_invalid_pkt_length;
+ struct regpair gsi_unsupported_pkt_typ;
+ struct regpair gsi_crcchksm_error;
+};
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct core_ll2_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes
+ /* number of total bytes sent without errors */;
+ struct regpair sent_mcast_bytes
+ /* number of total bytes sent without errors */;
+ struct regpair sent_bcast_bytes
+ /* number of total bytes sent without errors */;
+ struct regpair sent_ucast_pkts
+ /* number of total packets sent without errors */;
+ struct regpair sent_mcast_pkts
+ /* number of total packets sent without errors */;
+ struct regpair sent_bcast_pkts
+ /* number of total packets sent without errors */;
+};
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_rx_prod {
+ __le16 bd_prod /* BD Producer */;
+ __le16 cqe_prod /* CQE Producer */;
+ __le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+ struct regpair packet_too_big_discard
+ /* Number of packets discarded because they are bigger than MTU */;
+ struct regpair no_buff_discard
+ /* Number of packets discarded due to lack of host buffers */;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+/*
+ * Core Ramrod Command IDs (light L2)
+ */
+enum core_ramrod_cmd_id {
+ CORE_RAMROD_UNUSED,
+ CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ MAX_CORE_RAMROD_CMD_ID
+};
+
+/*
+ * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
+ */
+struct core_rx_action_on_error {
+ u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+/*
+ * Core RX BD for Light L2
+ */
+struct core_rx_bd {
+ struct regpair addr;
+ __le16 reserved[4];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+struct core_rx_bd_with_buff_len {
+ struct regpair addr;
+ __le16 buff_length;
+ __le16 reserved[3];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_bd_union {
+ struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
+ struct core_rx_bd_with_buff_len rx_bd_with_len
+ /* Core Rx Bd with dynamic buffer length */;
+};
+
+/*
+ * Opaque Data for Light L2 RX CQE .
+ */
+struct core_rx_cqe_opaque_data {
+ __le32 data[2] /* Opaque CQE Data */;
+};
+
+/*
+ * Core RX CQE Type for Light L2
+ */
+enum core_rx_cqe_type {
+ CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
+ CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
+ CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
+ CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
+ MAX_CORE_RX_CQE_TYPE
+};
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_fast_path_cqe {
+ u8 type /* CQE type */;
+ u8 placement_offset
+ /* Offset (in bytes) of the packet from start of the buffer */;
+ struct parsing_and_err_flags parse_flags
+ /* Parsing and error flags from the parser */;
+ __le16 packet_length /* Total packet length (from the parser) */;
+ __le16 vlan /* 802.1q VLAN tag */;
+ struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+ __le32 reserved[4];
+};
+
+/*
+ * Core Rx CM offload CQE .
+ */
+struct core_rx_gsi_offload_cqe {
+ u8 type /* CQE type */;
+ u8 data_length_error /* set if gsi data is bigger than buff */;
+ struct parsing_and_err_flags parse_flags
+ /* Parsing and error flags from the parser */;
+ __le16 data_length /* Total packet length (from the parser) */;
+ __le16 vlan /* 802.1q VLAN tag */;
+ __le32 src_mac_addrhi /* hi 4 bytes source mac address */;
+ __le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
+ u8 reserved1[2];
+ __le32 gid_dst[4] /* Gid destination address */;
+};
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_slow_path_cqe {
+ u8 type /* CQE type */;
+ u8 ramrod_cmd_id;
+ __le16 echo;
+ __le32 reserved1[7];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_cqe_union {
+ struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
+ struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
+ struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct core_rx_start_ramrod_data {
+ struct regpair bd_base /* bd address of the first bd page */;
+ struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+ __le16 mtu /* Maximum transmission unit */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* index of the protocol index */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
+ __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+ u8 inner_vlan_removal_en
+ /* if set, 802.1q tags will be removed and copied to CQE */;
+ u8 queue_id /* Light L2 RX Queue ID */;
+ u8 main_func_queue /* Is this the main queue for the PF */;
+ u8 mf_si_bcast_accept_all;
+ u8 mf_si_mcast_accept_all;
+ struct core_rx_action_on_error action_on_error;
+ u8 gsi_offload_flag
+ /* set when in GSI offload mode on ROCE connection */;
+ u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx queue stop ramrod
+ */
+struct core_rx_stop_ramrod_data {
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 queue_id /* Light L2 RX Queue ID */;
+ u8 reserved1;
+ __le16 reserved2[2];
+};
+
+/*
+ * Flags for Core TX BD
+ */
+struct core_tx_bd_flags {
+ u8 as_bitfield;
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
+#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+};
+
+/*
+ * Core TX BD for Light L2
+ */
+struct core_tx_bd {
+ struct regpair addr /* Buffer Address */;
+ __le16 nbytes /* Number of Bytes in Buffer */;
+ __le16 vlan /* VLAN to insert to packet (if insertion flag set) */;
+ u8 nbds /* Number of BDs that make up one packet */;
+ struct core_tx_bd_flags bd_flags /* BD Flags */;
+ __le16 l4_hdr_offset_w;
+};
+
+/*
+ * Light L2 TX Destination
+ */
+enum core_tx_dest {
+ CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */,
+ CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
+ MAX_CORE_TX_DEST
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct core_tx_start_ramrod_data {
+ struct regpair pbl_base_addr /* Address of the pbl page */;
+ __le16 mtu /* Maximum transmission unit */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* Status block protocol index */;
+ u8 tx_dest /* TX Destination (either Network or LB) */;
+ u8 stats_en /* Statistics Enable */;
+ u8 stats_id /* Statistics Counter ID */;
+ __le16 pbl_size /* Number of BD pages pointed by PBL */;
+ __le16 qm_pq_id /* QM PQ ID */;
+ u8 conn_type /* connection type that loaded ll2 */;
+ u8 gsi_offload_flag
+ /* set when in GSI offload mode on ROCE connection */;
+ u8 resrved[2];
+};
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct core_tx_stop_ramrod_data {
+ __le32 reserved0[2];
+};
+
+struct eth_mstorm_per_queue_stat {
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct eth_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes
+ /* number of total bytes sent without errors */;
+ struct regpair sent_mcast_bytes
+ /* number of total bytes sent without errors */;
+ struct regpair sent_bcast_bytes
+ /* number of total bytes sent without errors */;
+ struct regpair sent_ucast_pkts
+ /* number of total packets sent without errors */;
+ struct regpair sent_mcast_pkts
+ /* number of total packets sent without errors */;
+ struct regpair sent_bcast_pkts
+ /* number of total packets sent without errors */;
+ struct regpair error_drop_pkts
+ /* number of total packets dropped due to errors */;
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_rate_limit {
+ __le16 mult;
+ __le16 cnst
+ /* Constant term to add (or subtract from number of cycles) */;
+ u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+struct eth_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+/*
+ * Event Ring Next Page Address
+ */
+struct event_ring_next_addr {
+ struct regpair addr /* Next Page Address */;
+ __le32 reserved[2] /* Reserved */;
+};
+
+/*
+ * Event Ring Element
+ */
+union event_ring_element {
+ struct event_ring_entry entry /* Event Ring Entry */;
+ struct event_ring_next_addr next_addr /* Event Ring Next Page Address */
+ ;
+};
+
+/*
+ * Ports mode
+ */
+enum fw_flow_ctrl_mode {
+ flow_ctrl_pause,
+ flow_ctrl_pfc,
+ MAX_FW_FLOW_CTRL_MODE
+};
+
+/*
+ * Integration Phase
+ */
+enum integ_phase {
+ INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */,
+ INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */,
+ INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */,
+ MAX_INTEG_PHASE
+};
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
+ VF_PF_CHANNEL_NOT_READY
+ /* Writing to VF/PF channel when it is not ready */,
+ VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
+ VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
+ ETH_PACKET_TOO_SMALL
+ /* TX packet is shorter then reported on BDs or from minimal size */
+ ,
+ ETH_ILLEGAL_VLAN_MODE
+ /* Tx packet with marked as insert VLAN when its illegal */,
+ ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
+ ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
+ ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
+ ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */
+ ,
+ ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
+ ETH_INSUFFICIENT_BDS
+ /* There are not enough BDs for transmission of even one packet */,
+ ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
+ ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
+ ETH_ZERO_SIZE_BD
+ /* empty BD (which not contains control flags) is illegal */,
+ ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */,
+ ETH_INSUFFICIENT_PAYLOAD
+ ,
+ ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
+ ETH_TUNN_IPV6_EXT_NBD_ERR
+ /* Tunneled packet with IPv6+Ext without a proper number of BDs */,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
+
+/*
+ * Mstorm non-triggering VF zone
+ */
+struct mstorm_non_trigger_vf_zone {
+ struct eth_mstorm_per_queue_stat eth_queue_stat
+ /* VF statistic bucket */;
+};
+
+/*
+ * Mstorm VF zone
+ */
+struct mstorm_vf_zone {
+ struct mstorm_non_trigger_vf_zone non_trigger
+ /* non-interrupt-triggering zone */;
+};
+
+/*
+ * personality per PF
+ */
+enum personality_type {
+ BAD_PERSONALITY_TYP,
+ PERSONALITY_ISCSI /* iSCSI and LL2 */,
+ PERSONALITY_FCOE /* Fcoe and LL2 */,
+ PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
+ PERSONALITY_RDMA /* Roce and LL2 */,
+ PERSONALITY_CORE /* CORE(LL2) */,
+ PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_TOE /* Toe and LL2 */,
+ MAX_PERSONALITY_TYPE
+};
+
+/*
+ * tunnel configuration
+ */
+struct pf_start_tunnel_config {
+ u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port. */;
+ u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port. */
+ ;
+ u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+ u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */
+ ;
+ u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */
+ ;
+ u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+ u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+ u8 tunnel_clss_l2geneve
+ /* Classification scheme for l2 GENEVE tunnel. */;
+ u8 tunnel_clss_ipgeneve
+ /* Classification scheme for ip GENEVE tunnel. */;
+ u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+ __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/*
+ * Ramrod data for PF start ramrod
+ */
+struct pf_start_ramrod_data {
+ struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
+ struct regpair consolid_q_pbl_addr
+ /* PBL address of consolidation queue */;
+ struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */
+ ;
+ __le16 event_ring_sb_id /* Status block ID */;
+ u8 base_vf_id;
+ ;
+ u8 num_vfs /* Amount of vfs owned by PF */;
+ u8 event_ring_num_pages /* Number of PBL pages in event ring */;
+ u8 event_ring_sb_index /* Status block index */;
+ u8 path_id /* HW path ID (engine ID) */;
+ u8 warning_as_error /* In FW asserts, treat warning as error */;
+ u8 dont_log_ramrods
+ /* If not set - throw a warning for each ramrod (for debug) */;
+ u8 personality /* define what type of personality is new PF */;
+ __le16 log_type_mask;
+ u8 mf_mode /* Multi function mode */;
+ u8 integ_phase /* Integration phase */;
+ u8 allow_npar_tx_switching;
+ u8 inner_to_outer_pri_map[8];
+ u8 pri_map_valid
+ /* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+ ;
+ __le32 outer_tag;
+ u8 reserved0[4];
+};
+
+/*
+ * Data for port update ramrod
+ */
+struct protocol_dcb_data {
+ u8 dcb_enable_flag /* dcbEnable flag value */;
+ u8 dcb_priority /* dcbPri flag value */;
+ u8 dcb_tc /* dcb TC value */;
+ u8 reserved;
+};
+
+/*
+ * tunnel configuration
+ */
+struct pf_update_tunnel_config {
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 set_vxlan_udp_port_flg
+ /* Update VXLAN tunnel UDP destination port. */;
+ u8 set_geneve_udp_port_flg
+ /* Update GENEVE tunnel UDP destination port. */;
+ u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+ u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */
+ ;
+ u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */
+ ;
+ u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+ u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+ u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+ u8 tunnel_clss_l2geneve
+ /* Classification scheme for l2 GENEVE tunnel. */;
+ u8 tunnel_clss_ipgeneve
+ /* Classification scheme for ip GENEVE tunnel. */;
+ u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+ __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+ __le16 reserved[3];
+};
+
+/*
+ * Data for port update ramrod
+ */
+struct pf_update_ramrod_data {
+ u8 pf_id;
+ u8 update_eth_dcb_data_flag /* Update Eth DCB data indication */;
+ u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */;
+ u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */;
+ u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */;
+ u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */;
+ u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
+ u8 reserved;
+ struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
+ struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
+ struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */
+ ;
+ struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
+ struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */
+ ;
+ __le16 mf_vlan /* new outer vlan id value */;
+ __le16 reserved2;
+ struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */
+ ;
+};
+
+/*
+ * Ports mode
+ */
+enum ports_mode {
+ ENGX2_PORTX1 /* 2 engines x 1 port */,
+ ENGX2_PORTX2 /* 2 engines x 2 ports */,
+ ENGX1_PORTX1 /* 1 engine x 1 port */,
+ ENGX1_PORTX2 /* 1 engine x 2 ports */,
+ ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ MAX_PORTS_MODE
+};
+
+/*
+ * RDMA TX Stats
+ */
+struct rdma_sent_stats {
+ struct regpair sent_bytes /* number of total RDMA bytes sent */;
+ struct regpair sent_pkts /* number of total RDMA packets sent */;
+};
+
+/*
+ * Pstorm non-triggering VF zone
+ */
+struct pstorm_non_trigger_vf_zone {
+ struct eth_pstorm_per_queue_stat eth_queue_stat
+ /* VF statistic bucket */;
+ struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
+};
+
+/*
+ * Pstorm VF zone
+ */
+struct pstorm_vf_zone {
+ struct pstorm_non_trigger_vf_zone non_trigger
+ /* non-interrupt-triggering zone */;
+ struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
+};
+
+/*
+ * Ramrod Header of SPQE
+ */
+struct ramrod_header {
+ __le32 cid /* Slowpath Connection CID */;
+ u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+ u8 protocol_id /* Ramrod Protocol ID */;
+ __le16 echo /* Ramrod echo */;
+};
+
+/*
+ * RDMA RX Stats
+ */
+struct rdma_rcv_stats {
+ struct regpair rcv_bytes /* number of total RDMA bytes received */;
+ struct regpair rcv_pkts /* number of total RDMA packets received */;
+};
+
+/*
+ * Slowpath Element (SPQE)
+ */
+struct slow_path_element {
+ struct ramrod_header hdr /* Ramrod Header */;
+ struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
+};
+
+/*
+ * Tstorm non-triggering VF zone
+ */
+struct tstorm_non_trigger_vf_zone {
+ struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
+};
+
+struct tstorm_per_port_stat {
+ struct regpair trunc_error_discard
+ /* packet is dropped because it was truncated in NIG */;
+ struct regpair mac_error_discard
+ /* packet is dropped because of Ethernet FCS error */;
+ struct regpair mftag_filter_discard
+ /* packet is dropped because classification was unsuccessful */;
+ struct regpair eth_mac_filter_discard;
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt
+ /* packet is an ISCSI irregular packet */;
+ struct regpair fcoe_irregular_pkt
+ /* packet is an FCOE irregular packet */;
+ struct regpair roce_irregular_pkt
+ /* packet is an ROCE irregular packet */;
+ struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */
+ ;
+ struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */
+ ;
+ struct regpair preroce_irregular_pkt
+ /* packet is an PREROCE irregular packet */;
+};
+
+/*
+ * Tstorm VF zone
+ */
+struct tstorm_vf_zone {
+ struct tstorm_non_trigger_vf_zone non_trigger
+ /* non-interrupt-triggering zone */;
+};
+
+/*
+ * Tunnel classification scheme
+ */
+enum tunnel_clss {
+ TUNNEL_CLSS_MAC_VLAN =
+ 0
+ /* Use MAC & VLAN from first L2 header for vport classification. */
+ ,
+ TUNNEL_CLSS_MAC_VNI
+ ,
+ TUNNEL_CLSS_INNER_MAC_VLAN
+ /* Use MAC and VLAN from last L2 header for vport classification */
+ ,
+ TUNNEL_CLSS_INNER_MAC_VNI
+ ,
+ MAX_TUNNEL_CLSS
+};
+
+/*
+ * Ustorm non-triggering VF zone
+ */
+struct ustorm_non_trigger_vf_zone {
+ struct eth_ustorm_per_queue_stat eth_queue_stat
+ /* VF statistic bucket */;
+ struct regpair vf_pf_msg_addr /* VF-PF message address */;
+};
+
+/*
+ * Ustorm triggering VF zone
+ */
+struct ustorm_trigger_vf_zone {
+ u8 vf_pf_msg_valid /* VF-PF message valid flag */;
+ u8 reserved[7];
+};
+
+/*
+ * Ustorm VF zone
+ */
+struct ustorm_vf_zone {
+ struct ustorm_non_trigger_vf_zone non_trigger
+ /* non-interrupt-triggering zone */;
+ struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
+};
+
+/*
+ * VF-PF channel data
+ */
+struct vf_pf_channel_data {
+ __le32 ready;
+ u8 valid;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_start_ramrod_data {
+ u8 vf_id /* VF ID */;
+ u8 enable_flr_ack;
+ __le16 opaque_fid /* VF opaque FID */;
+ u8 personality /* define what type of personality is new VF */;
+ u8 reserved[3];
+};
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_stop_ramrod_data {
+ u8 vf_id /* VF ID */;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
+/*
+ * Attentions status block
+ */
+struct atten_status_block {
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index /* status block running index */;
+ __le32 reserved1;
+};
+
+enum block_addr {
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x620000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
+ MAX_BLOCK_ADDR
+};
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_MISC_AEU,
+ BLOCK_BAR0_MAP,
+ MAX_BLOCK_ID
+};
+
+/*
+ * Igu cleanup bit values to distinguish between clean or producer consumer
+ */
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+/*
+ * DMAE command
+ */
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo
+ /* PCIe source address low in bytes or GRC source address in DW */;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length /* Length in DW */;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo /* PCIe completion address low or grc address */;
+ __le32 comp_addr_hi;
+ __le32 comp_val /* Value to write to completion address */;
+ __le32 crc32 /* crc16 result */;
+ __le32 crc_32_c /* crc32_c result */;
+ __le16 crc16 /* crc16 result */;
+ __le16 crc16_c /* crc16_c result */;
+ __le16 crc10 /* crc_t10 result */;
+ __le16 reserved;
+ __le16 xsum16 /* checksum16 result */;
+ __le16 xsum8 /* checksum8 result */;
+};
+
+struct fw_ver_num {
+ u8 major /* Firmware major version number */;
+ u8 minor /* Firmware minor version number */;
+ u8 rev /* Firmware revision version number */;
+ u8 eng /* Firmware engineering version number (for bootleg versions) */
+ ;
+};
+
+struct fw_ver_info {
+ __le16 tools_ver /* Tools version number */;
+ u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
+ u8 reserved1;
+ struct fw_ver_num num /* FW version number */;
+ __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
+ __le32 reserved2;
+};
+
+struct storm_ram_section {
+ __le16 offset
+ /* The offset of the section in the RAM (in 64 bit units) */;
+ __le16 size /* The size of the section (in 64 bit units) */;
+};
+
+struct fw_info {
+ struct fw_ver_info ver /* FW version information */;
+ struct storm_ram_section fw_asserts_section
+ /* The FW Asserts offset/size in Storm RAM */;
+ __le32 reserved;
+};
+
+struct fw_info_location {
+ __le32 grc_addr /* GRC address where the fw_info struct is located. */;
+ __le32 size
+ /* Size of the fw_info structure (thats located at the grc_addr). */
+ ;
+};
+
+/*
+ * IGU cleanup command
+ */
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+/*
+ * IGU firmware driver command
+ */
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+/*
+ * IGU firmware driver command
+ */
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+/*
+ * IGU mapping line structure
+ */
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+/*
+ * IGU MSIX line structure
+ */
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+enum init_modes {
+ MODE_BB_A0,
+ MODE_BB_B0,
+ MODE_K2,
+ MODE_ASIC,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_100G,
+ MODE_EAGLE_ENG1_WORKAROUND,
+ MAX_INIT_MODES
+};
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_VF,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+/*
+ * per encapsulation type enabling flags
+ */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+ TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+ TPH_ST_HINT_TARGET
+ /* Device Write and Host Read, or Host Write and Device Read */,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/*
+ * QM hardware structure of enable bypass credit mask
+ */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/*
+ * QM hardware structure of opportunistic credit mask
+ */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/*
+ * QM hardware structure of QM map memory
+ */
+struct qm_rf_pq_map {
+ __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/*
+ * Completion params for aggregated interrupt completion
+ */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+#endif /* __ECORE_HSI_COMMON__ */
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
new file mode 100644
index 00000000..80f4165f
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_ETH__
+#define __ECORE_HSI_ETH__
+/************************************************************************/
+/* Add include to common eth target for both eCore and protocol driver */
+/************************************************************************/
+#include "eth_common.h"
+
+/*
+ * The eth storm context for the Tstorm
+ */
+struct tstorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/*
+ * The eth storm context for the Pstorm
+ */
+struct pstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/*
+ * The eth storm context for the Xstorm
+ */
+struct xstorm_eth_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* cf_array0 */;
+ __le32 reg6 /* cf_array1 */;
+ __le16 word7 /* word7 */;
+ __le16 word8 /* word8 */;
+ __le16 word9 /* word9 */;
+ __le16 word10 /* word10 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ __le32 reg9 /* reg9 */;
+ u8 byte7 /* byte7 */;
+ u8 byte8 /* byte8 */;
+ u8 byte9 /* byte9 */;
+ u8 byte10 /* byte10 */;
+ u8 byte11 /* byte11 */;
+ u8 byte12 /* byte12 */;
+ u8 byte13 /* byte13 */;
+ u8 byte14 /* byte14 */;
+ u8 byte15 /* byte15 */;
+ u8 byte16 /* byte16 */;
+ __le16 word11 /* word11 */;
+ __le32 reg10 /* reg10 */;
+ __le32 reg11 /* reg11 */;
+ __le32 reg12 /* reg12 */;
+ __le32 reg13 /* reg13 */;
+ __le32 reg14 /* reg14 */;
+ __le32 reg15 /* reg15 */;
+ __le32 reg16 /* reg16 */;
+ __le32 reg17 /* reg17 */;
+ __le32 reg18 /* reg18 */;
+ __le32 reg19 /* reg19 */;
+ __le16 word12 /* word12 */;
+ __le16 word13 /* word13 */;
+ __le16 word14 /* word14 */;
+ __le16 word15 /* word15 */;
+};
+
+/*
+ * The eth storm context for the Ystorm
+ */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+struct ystorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 terminate_spqe /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 tx_bd_cons_upd /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 tx_int_coallecing_timeset /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+/*
+ * The eth storm context for the Ustorm
+ */
+struct ustorm_eth_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/*
+ * The eth storm context for the Mstorm
+ */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/*
+ * eth connection context
+ */
+struct eth_conn_context {
+ struct tstorm_eth_conn_st_ctx tstorm_st_context
+ /* tstorm storm context */;
+ struct regpair tstorm_st_padding[2] /* padding */;
+ struct pstorm_eth_conn_st_ctx pstorm_st_context
+ /* pstorm storm context */;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context
+ /* xstorm storm context */;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context
+ /* xstorm aggregative context */;
+ struct ystorm_eth_conn_st_ctx ystorm_st_context
+ /* ystorm storm context */;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context
+ /* ystorm aggregative context */;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context
+ /* tstorm aggregative context */;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context
+ /* ustorm aggregative context */;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context
+ /* ustorm storm context */;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context
+ /* mstorm storm context */;
+};
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_error_code {
+ ETH_OK = 0x00 /* command succeeded */,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL
+ /* mac add filters command failed due to cam full state */,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2
+ /* mac add filters command failed due to mtt2 full state */,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2
+ /* mac add filters command failed due to duplicate mac address */,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2
+ /* mac add filters command failed due to duplicate mac address */,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF
+ /* mac delete filters command failed due to not found state */,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2
+ /* mac delete filters command failed due to not found state */,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2
+ /* mac delete filters command failed due to not found state */,
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC
+ /* mac add filters command failed due to MAC Address of
+ * 00:00:00:00:00:00
+ */
+ ,
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL
+ /* vlan add filters command failed due to cam full state */,
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP
+ /* vlan add filters command failed due to duplicate VLAN filter */,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF
+ /* vlan delete filters command failed due to not found state */,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1
+ /* vlan delete filters command failed due to not found state */,
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP
+ /* pair add filters command failed due to duplicate request */,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL
+ /* pair add filters command failed due to full state */,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC
+ /* pair add filters command failed due to full state */,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF
+ /* pair add filters command failed due not found state */,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1
+ /* pair add filters command failed due not found state */,
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC
+ /* pair add filters command failed due to MAC Address of
+ * 00:00:00:00:00:00
+ */
+ ,
+ ETH_FILTERS_VNI_ADD_FAIL_FULL
+ /* vni add filters command failed due to cam full state */,
+ ETH_FILTERS_VNI_ADD_FAIL_DUP
+ /* vni add filters command failed due to duplicate VNI filter */,
+ MAX_ETH_ERROR_CODE
+};
+
+/*
+ * opcodes for the event ring
+ */
+enum eth_event_opcode {
+ ETH_EVENT_UNUSED,
+ ETH_EVENT_VPORT_START,
+ ETH_EVENT_VPORT_UPDATE,
+ ETH_EVENT_VPORT_STOP,
+ ETH_EVENT_TX_QUEUE_START,
+ ETH_EVENT_TX_QUEUE_STOP,
+ ETH_EVENT_RX_QUEUE_START,
+ ETH_EVENT_RX_QUEUE_UPDATE,
+ ETH_EVENT_RX_QUEUE_STOP,
+ ETH_EVENT_FILTERS_UPDATE,
+ ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+ ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+ ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
+ ETH_EVENT_RX_ADD_UDP_FILTER,
+ ETH_EVENT_RX_DELETE_UDP_FILTER,
+ ETH_EVENT_RX_ADD_GFT_FILTER,
+ ETH_EVENT_RX_DELETE_GFT_FILTER,
+ ETH_EVENT_RX_CREATE_GFT_ACTION,
+ MAX_ETH_EVENT_OPCODE
+};
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum eth_filter_action {
+ ETH_FILTER_ACTION_UNUSED,
+ ETH_FILTER_ACTION_REMOVE,
+ ETH_FILTER_ACTION_ADD,
+ ETH_FILTER_ACTION_REMOVE_ALL
+ /* Remove all filters of given type and vport ID. */,
+ MAX_ETH_FILTER_ACTION
+};
+
+/*
+ * Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd {
+ u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+ u8 vport_id /* the vport id */;
+ u8 action /* filter command action: add/remove/replace */;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+/*
+ * $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd_header {
+ u8 rx /* If set, apply these commands to the RX path */;
+ u8 tx /* If set, apply these commands to the TX path */;
+ u8 cmd_cnt /* Number of filter commands */;
+ u8 assert_on_error;
+ u8 reserved1[4];
+};
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_filter_type {
+ ETH_FILTER_TYPE_UNUSED,
+ ETH_FILTER_TYPE_MAC /* Add/remove a MAC address */,
+ ETH_FILTER_TYPE_VLAN /* Add/remove a VLAN */,
+ ETH_FILTER_TYPE_PAIR /* Add/remove a MAC-VLAN pair */,
+ ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */,
+ ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */,
+ ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */,
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR /* Add/remove a inner MAC-VNI pair */
+ ,
+ ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */,
+ ETH_FILTER_TYPE_VNI /* Add/remove a VNI */,
+ MAX_ETH_FILTER_TYPE
+};
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ipv4_frag_type {
+ ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */,
+ ETH_IPV4_FIRST_FRAG
+ /* First Fragment of IPv4 Packet (contains headers) */,
+ ETH_IPV4_NON_FIRST_FRAG
+ /* Non-First Fragment of IPv4 Packet (does not contain headers) */,
+ MAX_ETH_IPV4_FRAG_TYPE
+};
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ip_type {
+ ETH_IPV4 /* IPv4 */,
+ ETH_IPV6 /* IPv6 */,
+ MAX_ETH_IP_TYPE
+};
+
+/*
+ * Ethernet Ramrod Command IDs
+ */
+enum eth_ramrod_cmd_id {
+ ETH_RAMROD_UNUSED,
+ ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+ ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+ ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+ ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+ ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+ ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+ ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+ ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+ ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION
+ /* RX - Create an Openflow Action */,
+ ETH_RAMROD_RX_ADD_OPENFLOW_FILTER
+ /* RX - Add an Openflow Filter to the Searcher */,
+ ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER
+ /* RX - Delete an Openflow Filter to the Searcher */,
+ ETH_RAMROD_RX_ADD_UDP_FILTER /* RX - Add a UDP Filter to the Searcher */
+ ,
+ ETH_RAMROD_RX_DELETE_UDP_FILTER
+ /* RX - Delete a UDP Filter to the Searcher */,
+ ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create an Gft Action */,
+ ETH_RAMROD_RX_DELETE_GFT_FILTER
+ /* RX - Delete an GFT Filter to the Searcher */,
+ ETH_RAMROD_RX_ADD_GFT_FILTER
+ /* RX - Add an GFT Filter to the Searcher */,
+ MAX_ETH_RAMROD_CMD_ID
+};
+
+/*
+ * return code from eth sp ramrods
+ */
+struct eth_return_code {
+ u8 value;
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+/*
+ * What to do in case an error occurs
+ */
+enum eth_tx_err {
+ ETH_TX_ERR_DROP /* Drop erroneous packet. */,
+ ETH_TX_ERR_ASSERT_MALICIOUS
+ /* Assert an interrupt for PF, declare as malicious for VF */,
+ MAX_ETH_TX_ERR
+};
+
+/*
+ * Array of the different error type behaviors
+ */
+struct eth_tx_err_vals {
+ __le16 values;
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+};
+
+/*
+ * vport rss configuration data
+ */
+struct eth_vport_rss_config {
+ __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
+ u8 rss_id;
+ u8 rss_mode /* The RSS mode for this function */;
+ u8 update_rss_key /* if set update the rss key */;
+ u8 update_rss_ind_table /* if set update the indirection table */;
+ u8 update_rss_capabilities /* if set update the capabilities */;
+ u8 tbl_size /* rss mask (Tbl size) */;
+ __le32 reserved2[2];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]
+ /* RSS indirection table */;
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS] /* RSS key supplied to us by OS */
+ ;
+ __le32 reserved3[2];
+};
+
+/*
+ * eth vport RSS mode
+ */
+enum eth_vport_rss_mode {
+ ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */,
+ ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */,
+ MAX_ETH_VPORT_RSS_MODE
+};
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_rx_mode {
+ __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+ __le16 reserved2[3];
+};
+
+/*
+ * Command for setting tpa parameters
+ */
+struct eth_vport_tpa_param {
+ u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */;
+ u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
+ u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
+ u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg
+ /* If set, put header of first TPA segment on bd and data on SGE */
+ ;
+ u8 tpa_gro_consistent_flg
+ /* If set, GRO data consistent will checked for TPA continue */;
+ u8 tpa_max_aggs_num
+ /* maximum number of opened aggregations per v-port */;
+ __le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
+ __le16 tpa_min_size_to_start
+ /* minimum TCP payload size for a packet to start aggregation */;
+ __le16 tpa_min_size_to_cont
+ /* minimum TCP payload size for a packet to continue aggregation */
+ ;
+ u8 max_buff_num
+ /* maximal number of buffers that can be used for one aggregation */
+ ;
+ u8 reserved;
+};
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_tx_mode {
+ __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+ __le16 reserved2[3];
+};
+
+/*
+ * Ramrod data for rx add gft filter data
+ */
+struct rx_add_gft_filter_data {
+ struct regpair pkt_hdr_addr /* Packet Header That Defines GFT Filter */
+ ;
+ __le16 action_icid /* ICID of Action to run for this filter */;
+ __le16 pkt_hdr_length /* Packet Header Length */;
+ u8 reserved[4];
+};
+
+/*
+ * Ramrod data for rx add openflow filter
+ */
+struct rx_add_openflow_filter_data {
+ __le16 action_icid /* CID of Action to run for this filter */;
+ u8 priority /* Searcher String - Packet priority */;
+ u8 reserved0;
+ __le32 tenant_id /* Searcher String - Tenant ID */;
+ __le16 dst_mac_hi /* Searcher String - Destination Mac Bytes 0 to 1 */;
+ __le16 dst_mac_mid /* Searcher String - Destination Mac Bytes 2 to 3 */
+ ;
+ __le16 dst_mac_lo /* Searcher String - Destination Mac Bytes 4 to 5 */;
+ __le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */;
+ __le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */;
+ __le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */;
+ __le16 vlan_id /* Searcher String - Vlan ID */;
+ __le16 l2_eth_type /* Searcher String - Last L2 Ethertype */;
+ u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */;
+ u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */;
+ u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */;
+ u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+ __le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */;
+ __le32 ipv4_src_addr /* Searcher String - IPv4 Source Address */;
+ __le16 l4_dst_port /* Searcher String - TCP/UDP Destination Port */;
+ __le16 l4_src_port /* Searcher String - TCP/UDP Source Port */;
+};
+
+/*
+ * Ramrod data for rx create gft action
+ */
+struct rx_create_gft_action_data {
+ u8 vport_id /* Vport Id of GFT Action */;
+ u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx create openflow action
+ */
+struct rx_create_openflow_action_data {
+ u8 vport_id /* ID of RX queue */;
+ u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct rx_queue_start_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+ __le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* index of the protocol index */;
+ u8 vport_id /* ID of virtual port */;
+ u8 default_rss_queue_flg /* set queue as default rss queue if set */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 stats_counter_id /* Statistics counter ID */;
+ u8 pin_context /* Pin context in CCFC to improve performance */;
+ u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */;
+ u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet placement */
+ ;
+ u8 pxp_st_hint
+ /* PXP command Steering tag hint. Use enum pxp_tph_st_hint */;
+ __le16 pxp_st_index /* PXP command Steering tag index */;
+ u8 pmd_mode
+ /* Indicates that current queue belongs to poll-mode driver */;
+ u8 notify_en;
+ u8 toggle_val
+ /* Initial value for the toggle valid bit - used in PMD mode */;
+ u8 reserved[7];
+ __le16 reserved1 /* FW reserved. */;
+ struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+ struct regpair bd_base /* bd address of the first bd page */;
+ struct regpair reserved2 /* FW reserved. */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct rx_queue_stop_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 vport_id /* ID of virtual port */;
+ u8 reserved[3];
+};
+
+/*
+ * Ramrod data for rx queue update ramrod
+ */
+struct rx_queue_update_ramrod_data {
+ __le16 rx_queue_id /* ID of RX queue */;
+ u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+ u8 complete_event_flg /* post completion to the event ring if set */;
+ u8 vport_id /* ID of virtual port */;
+ u8 reserved[4];
+ u8 reserved1 /* FW reserved. */;
+ u8 reserved2 /* FW reserved. */;
+ u8 reserved3 /* FW reserved. */;
+ __le16 reserved4 /* FW reserved. */;
+ __le16 reserved5 /* FW reserved. */;
+ struct regpair reserved6 /* FW reserved. */;
+};
+
+/*
+ * Ramrod data for rx Add UDP Filter
+ */
+struct rx_udp_filter_data {
+ __le16 action_icid /* CID of Action to run for this filter */;
+ __le16 vlan_id /* Searcher String - Vlan ID */;
+ u8 ip_type /* Searcher String - IP Type */;
+ u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+ __le16 reserved1;
+ __le32 ip_dst_addr[4];
+ /* Searcher String-IP Dest Addr for IPv4 use ip_dst_addr[0] only */
+ ;
+ __le32 ip_src_addr[4]
+ /* Searcher String-IP Src Addr, for IPv4 use ip_dst_addr[0] only */
+ ;
+ __le16 udp_dst_port /* Searcher String - UDP Destination Port */;
+ __le16 udp_src_port /* Searcher String - UDP Source Port */;
+ __le32 tenant_id /* Searcher String - Tenant ID */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id /* Status block ID */;
+ u8 sb_index /* Status block protocol index */;
+ u8 vport_id /* VPort ID */;
+ u8 reserved0 /* FW reserved. */;
+ u8 stats_counter_id /* Statistics counter ID to use */;
+ __le16 qm_pq_id /* QM PQ ID */;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+ u8 pxp_st_hint /* PXP command Steering tag hint */;
+ u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */;
+ u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */;
+ __le16 pxp_st_index /* PXP command Steering tag index */;
+ __le16 comp_agg_size /* TX completion min agg size - for PMD queues */;
+ __le16 queue_zone_id /* queue zone ID to use */;
+ __le16 test_dup_count /* In Test Mode, number of duplications */;
+ __le16 pbl_size /* Number of BD pages pointed by PBL */;
+ __le16 tx_queue_id
+ /* unique Queue ID - currently used only by PMD flow */;
+ struct regpair pbl_base_addr /* address of the pbl page */;
+ struct regpair bd_cons_address
+ /* BD consumer address in host - for PMD queues */;
+};
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct tx_queue_stop_ramrod_data {
+ __le16 reserved[4];
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_filter_update_ramrod_data {
+ struct eth_filter_cmd_header filter_cmd_hdr
+ /* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]
+ /* Filter Commands */;
+};
+
+/*
+ * Ramrod data for vport start ramrod
+ */
+struct vport_start_ramrod_data {
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en /* if set, drop packet with ttl=0 */;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode /* Rx filter data */;
+ struct eth_vport_tx_mode tx_mode /* Tx filter data */;
+ struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */
+ ;
+ __le16 default_vlan /* Default Vlan value to be forced by FW */;
+ u8 tx_switching_en /* Tx switching is enabled for current Vport */;
+ u8 anti_spoofing_en
+ /* Anti-spoofing verification is set for current Vport */;
+ u8 default_vlan_en
+ /* If set, the default Vlan value is forced by the FW */;
+ u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
+ ;
+ u8 silent_vlan_removal_en;
+ /* If enable then innerVlan will be striped and not written to cqe */
+ u8 untagged;
+ struct eth_tx_err_vals tx_err_behav
+ /* Desired behavior per TX error type */;
+ u8 zero_placement_offset;
+ u8 reserved[7];
+};
+
+/*
+ * Ramrod data for vport stop ramrod
+ */
+struct vport_stop_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data_cmn {
+ u8 vport_id;
+ u8 update_rx_active_flg /* set if rx active flag should be handled */;
+ u8 rx_active_flg /* rx active flag value */;
+ u8 update_tx_active_flg /* set if tx active flag should be handled */;
+ u8 tx_active_flg /* tx active flag value */;
+ u8 update_rx_mode_flg /* set if rx state data should be handled */;
+ u8 update_tx_mode_flg /* set if tx state data should be handled */;
+ u8 update_approx_mcast_flg
+ /* set if approx. mcast data should be handled */;
+ u8 update_rss_flg /* set if rss data should be handled */;
+ u8 update_inner_vlan_removal_en_flg
+ /* set if inner_vlan_removal_en should be handled */;
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg /* set if tpa enable changes */;
+ u8 update_tx_switching_en_flg
+ /* set if tx switching en flag should be handled */;
+ u8 tx_switching_en /* tx switching en value */;
+ u8 update_anti_spoofing_en_flg
+ /* set if anti spoofing flag should be handled */;
+ u8 anti_spoofing_en /* Anti-spoofing verification en value */;
+ u8 update_handle_ptp_pkts
+ /* set if handle_ptp_pkts should be handled. */;
+ u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
+ ;
+ u8 update_default_vlan_en_flg
+ /* If set, the default Vlan enable flag is updated */;
+ u8 default_vlan_en
+ /* If set, the default Vlan value is forced by the FW */;
+ u8 update_default_vlan_flg
+ /* If set, the default Vlan value is updated */;
+ __le16 default_vlan /* Default Vlan value to be forced by FW */;
+ u8 update_accept_any_vlan_flg
+ /* set if accept_any_vlan should be handled */;
+ u8 accept_any_vlan /* accept_any_vlan updated value */;
+ u8 silent_vlan_removal_en;
+ u8 update_mtu_flg
+ /* If set, MTU will be updated. Vport must be not active. */;
+ __le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
+ u8 reserved[2];
+};
+
+struct vport_update_ramrod_mcast {
+ __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS] /* multicast bins */;
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data {
+ struct vport_update_ramrod_data_cmn common
+ /* Common data for all vport update ramrods */;
+ struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
+ struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
+ struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */
+ ;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config /* rss config data */;
+};
+
+/*
+ * GFT CAM line struct
+ */
+struct gft_cam_line {
+ __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+/*
+ * GFT CAM line struct (for driversim use)
+ */
+struct gft_cam_line_mapped {
+ __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+/*
+ * Used in gft_profile_key: Indication for ip version
+ */
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+/*
+ * Profile key stucr fot GFT logic in Prs
+ */
+struct gft_profile_key {
+ __le16 profile_key;
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+/*
+ * Used in gft_profile_key: Indication for tunnel type
+ */
+enum gft_profile_tunnel_type {
+ GFT_PROFILE_NO_TUNNEL = 0,
+ GFT_PROFILE_VXLAN_TUNNEL = 1,
+ GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+ GFT_PROFILE_GRE_IP_TUNNEL = 3,
+ GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+ GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+ MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+/*
+ * Used in gft_profile_key: Indication for protocol type
+ */
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+/*
+ * GFT RAM line struct
+ */
+struct gft_ram_line {
+ __le32 low32bits;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 high32bits;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+/*
+ * Used in the first 2 bits for gft_ram_line: Indication for vlan mask
+ */
+enum gft_vlan_select {
+ INNER_PROVIDER_VLAN = 0,
+ INNER_VLAN = 1,
+ OUTER_PROVIDER_VLAN = 2,
+ OUTER_VLAN = 3,
+ MAX_GFT_VLAN_SELECT
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+/* @DPDK: xstormEthConnAgCtxDqExtLdPart */
+struct xstorm_eth_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 word1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 go_to_bd_cons /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+#endif /* __ECORE_HSI_ETH__ */
diff --git a/drivers/net/qede/base/ecore_hsi_tools.h b/drivers/net/qede/base/ecore_hsi_tools.h
new file mode 100644
index 00000000..18eea762
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hsi_tools.h
@@ -0,0 +1,1081 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_TOOLS__
+#define __ECORE_HSI_TOOLS__
+/**********************************/
+/* Tools HSI constants and macros */
+/**********************************/
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS 4
+#define MAX_INIT_PATTERN_SIZE (1 << INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN 19
+#define PXP_WIN_DWORD_SIZE_BITS 10
+#define PXP_WIN_DWORD_SIZE (1 << PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS 8
+#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS 18
+#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS 6
+#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS 8
+#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM 3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS 3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS 0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT 0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
+
+/* max number of register values following the idle check header for LSI */
+#define IDLE_CHK_MAX_LSI_DUMP_REGS 2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR 0
+#define IDLE_CHK_QM_RD_WR_BANK 1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* the MCP Trace meta data signautre is duplicated in the
+ * perl script that generats the NVRAM images
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
+/* Maximal number of RAM lines occupied by FW Asserts data */
+#define MAX_FW_ASSERTS_RAM_LINES 800
+
+/*
+ * Binary buffer header
+ */
+struct bin_buffer_hdr {
+ __le32 offset
+ /* buffer offset in bytes from the beginning of the binary file */;
+ __le32 length /* buffer length in bytes */;
+};
+
+/*
+ * binary buffer types
+ */
+enum bin_buffer_type {
+ BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
+ BIN_BUF_INIT_CMD /* init commands */,
+ BIN_BUF_INIT_VAL /* init data */,
+ BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+ BIN_BUF_IRO /* internal RAM offsets array */,
+ MAX_BIN_BUFFER_TYPE
+};
+
+/*
+ * Chip IDs
+ */
+enum chip_ids {
+ CHIP_BB_A0 /* BB A0 chip ID */,
+ CHIP_BB_B0 /* BB B0 chip ID */,
+ CHIP_K2 /* AH chip ID */,
+ MAX_CHIP_IDS
+};
+
+/*
+ * memory dump descriptor
+ */
+struct dbg_dump_mem_desc {
+ __le32 dword0;
+#define DBG_DUMP_MEM_DESC_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_DESC_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_MASK 0xF
+#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_SHIFT 24
+#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_MASK 0xF
+#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_SHIFT 28
+ __le32 dword1;
+#define DBG_DUMP_MEM_DESC_LENGTH_MASK 0x3FFFF
+#define DBG_DUMP_MEM_DESC_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_MASK 0x3F
+#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_SHIFT 18
+#define DBG_DUMP_MEM_DESC_BLOCK_ID_MASK 0xFF
+#define DBG_DUMP_MEM_DESC_BLOCK_ID_SHIFT 24
+};
+
+/*
+ * registers dump descriptor: chip
+ */
+struct dbg_dump_regs_chip_desc {
+ __le32 data;
+#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_MASK 0x1
+#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_SHIFT 0
+#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_MASK 0x7FFFFF
+#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_SHIFT 1
+#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_MASK 0xFF
+#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_SHIFT 24
+};
+
+/*
+ * registers dump descriptor: raw
+ */
+struct dbg_dump_regs_raw_desc {
+ __le32 data;
+#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_MASK 0x1
+#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_SHIFT 0
+#define DBG_DUMP_REGS_RAW_DESC_PARAM1_MASK 0x7FFFFF
+#define DBG_DUMP_REGS_RAW_DESC_PARAM1_SHIFT 1
+#define DBG_DUMP_REGS_RAW_DESC_PARAM2_MASK 0xFF
+#define DBG_DUMP_REGS_RAW_DESC_PARAM2_SHIFT 24
+};
+
+/*
+ * registers dump descriptor: sequence
+ */
+struct dbg_dump_regs_seq_desc {
+ __le32 data;
+#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_MASK 0x1
+#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_SHIFT 0
+#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_SHIFT 1
+#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_MASK 0xFF
+#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_SHIFT 24
+};
+
+/*
+ * registers dump descriptor
+ */
+union dbg_dump_regs_desc {
+ struct dbg_dump_regs_raw_desc raw /* dumped registers raw descriptor */
+ ;
+ struct dbg_dump_regs_seq_desc seq /* dumped registers seq descriptor */
+ ;
+ struct dbg_dump_regs_chip_desc chip
+ /* dumped registers chip descriptor */;
+};
+
+/*
+ * idle check macro types
+ */
+enum idle_chk_macro_types {
+ IDLE_CHK_MACRO_TYPE_COMPARE /* parametric register comparison */,
+ IDLE_CHK_MACRO_TYPE_QM_RD_WR /* compare QM r/w pointers and banks */,
+ MAX_IDLE_CHK_MACRO_TYPES
+};
+
+/*
+ * Idle Check result header
+ */
+struct idle_chk_result_hdr {
+ __le16 rule_idx /* Idle check rule index in CSV file */;
+ __le16 loop_idx /* the loop index in which the failure occurred */;
+ __le16 num_fw_values;
+ __le16 data;
+#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_MASK 0xF
+#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_SHIFT 0
+#define IDLE_CHK_RESULT_HDR_LOOP_VALID_MASK 0x1
+#define IDLE_CHK_RESULT_HDR_LOOP_VALID_SHIFT 4
+#define IDLE_CHK_RESULT_HDR_SEVERITY_MASK 0x7
+#define IDLE_CHK_RESULT_HDR_SEVERITY_SHIFT 5
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_MASK 0xF
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_SHIFT 8
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_MASK 0xF
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_SHIFT 12
+};
+
+/*
+ * Idle Check rule
+ */
+struct idle_chk_rule {
+ __le32 data;
+#define IDLE_CHK_RULE_ASIC_CHIP_MASK_MASK 0xF
+#define IDLE_CHK_RULE_ASIC_CHIP_MASK_SHIFT 0
+#define IDLE_CHK_RULE_SIM_CHIP_MASK_MASK 0xF
+#define IDLE_CHK_RULE_SIM_CHIP_MASK_SHIFT 4
+#define IDLE_CHK_RULE_BLOCK_ID_MASK 0xFF
+#define IDLE_CHK_RULE_BLOCK_ID_SHIFT 8
+#define IDLE_CHK_RULE_MACRO_TYPE_MASK 0xF
+#define IDLE_CHK_RULE_MACRO_TYPE_SHIFT 16
+#define IDLE_CHK_RULE_SEVERITY_MASK 0x7
+#define IDLE_CHK_RULE_SEVERITY_SHIFT 20
+#define IDLE_CHK_RULE_RESERVED_MASK 0x1
+#define IDLE_CHK_RULE_RESERVED_SHIFT 23
+#define IDLE_CHK_RULE_PRED_ID_MASK 0xFF
+#define IDLE_CHK_RULE_PRED_ID_SHIFT 24
+ __le16 loop;
+ __le16 increment
+ /* address increment of first argument register on each iteration */
+ ;
+ __le32 reg_addr[3];
+ __le32 pred_imm[3]
+ /* immediate values passed as arguments to the idle check rule */;
+};
+
+/*
+ * idle check severity types
+ */
+enum idle_chk_severity_types {
+ IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC
+ ,
+ IDLE_CHK_SEVERITY_WARNING
+ /* idle check failure should cause a warning */,
+ MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+/*
+ * init array header: raw
+ */
+struct init_array_raw_hdr {
+ __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+/*
+ * init array header: standard
+ */
+struct init_array_standard_hdr {
+ __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: zipped
+ */
+struct init_array_zipped_hdr {
+ __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: pattern
+ */
+struct init_array_pattern_hdr {
+ __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+/*
+ * init array header union
+ */
+union init_array_hdr {
+ struct init_array_raw_hdr raw /* raw init array header */;
+ struct init_array_standard_hdr standard /* standard init array header */
+ ;
+ struct init_array_zipped_hdr zipped /* zipped init array header */;
+ struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+/*
+ * init array types
+ */
+enum init_array_types {
+ INIT_ARR_STANDARD /* standard init array */,
+ INIT_ARR_ZIPPED /* zipped init array */,
+ INIT_ARR_PATTERN /* a repeated pattern */,
+ MAX_INIT_ARRAY_TYPES
+};
+
+/*
+ * init operation: callback
+ */
+struct init_callback_op {
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id /* Callback ID */;
+ __le16 block_id /* Blocks ID */;
+};
+
+/*
+ * init operation: delay
+ */
+struct init_delay_op {
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay /* delay in us */;
+};
+
+/*
+ * init operation: if_mode
+ */
+struct init_if_mode_op {
+ __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset
+ /* offset (in bytes) in modes expression buffer */;
+};
+
+/*
+ * init operation: if_phase
+ */
+struct init_if_phase_op {
+ __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+/*
+ * init mode operators
+ */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT /* init mode not operator */,
+ INIT_MODE_OP_OR /* init mode or operator */,
+ INIT_MODE_OP_AND /* init mode and operator */,
+ MAX_INIT_MODE_OPS
+};
+
+/*
+ * init operation: raw
+ */
+struct init_raw_op {
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2 /* Init param 2 */;
+};
+
+/*
+ * init array params
+ */
+struct init_op_array_params {
+ __le16 size /* array size in dwords */;
+ __le16 offset /* array start offset in dwords */;
+};
+
+/*
+ * Write init operation arguments
+ */
+union init_write_args {
+ __le32 inline_val
+ /* value to write, used when init source is INIT_SRC_INLINE */;
+ __le32 zeros_count;
+ __le32 array_offset
+ /* array offset to write, used when init source is INIT_SRC_ARRAY */
+ ;
+ struct init_op_array_params runtime;
+};
+
+/*
+ * init operation: write
+ */
+struct init_write_op {
+ __le32 data;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args /* Write init operation arguments */;
+};
+
+/*
+ * init operation: read
+ */
+struct init_read_op {
+ __le32 op_data;
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+ __le32 expected_val
+ /* expected polling value, used only when polling is done */;
+};
+
+/*
+ * Init operations union
+ */
+union init_op {
+ struct init_raw_op raw /* raw init operation */;
+ struct init_write_op write /* write init operation */;
+ struct init_read_op read /* read init operation */;
+ struct init_if_mode_op if_mode /* if_mode init operation */;
+ struct init_if_phase_op if_phase /* if_phase init operation */;
+ struct init_callback_op callback /* callback init operation */;
+ struct init_delay_op delay /* delay init operation */;
+};
+
+/*
+ * Init command operation types
+ */
+enum init_op_types {
+ INIT_OP_READ /* GRC read init command */,
+ INIT_OP_WRITE /* GRC write init command */,
+ INIT_OP_IF_MODE
+ /* Skip init commands if the init modes expression doesn't match */,
+ INIT_OP_IF_PHASE
+ /* Skip init commands if the init phase doesn't match */,
+ INIT_OP_DELAY /* delay init command */,
+ INIT_OP_CALLBACK /* callback init command */,
+ MAX_INIT_OP_TYPES
+};
+
+/*
+ * init polling types
+ */
+enum init_poll_types {
+ INIT_POLL_NONE /* No polling */,
+ INIT_POLL_EQ /* init value is included in the init command */,
+ INIT_POLL_OR /* init value is all zeros */,
+ INIT_POLL_AND /* init value is an array of values */,
+ MAX_INIT_POLL_TYPES
+};
+
+/*
+ * init source types
+ */
+enum init_source_types {
+ INIT_SRC_INLINE /* init value is included in the init command */,
+ INIT_SRC_ZEROS /* init value is all zeros */,
+ INIT_SRC_ARRAY /* init value is an array of values */,
+ INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ MAX_INIT_SOURCE_TYPES
+};
+
+/*
+ * Internal RAM Offsets macro data
+ */
+struct iro {
+ __le32 base /* RAM field offset */;
+ __le16 m1 /* multiplier 1 */;
+ __le16 m2 /* multiplier 2 */;
+ __le16 m3 /* multiplier 3 */;
+ __le16 size /* RAM field size */;
+};
+
+/*
+ * register descriptor
+ */
+struct reg_desc {
+ __le32 data;
+#define REG_DESC_ADDRESS_MASK 0xFFFFFF
+#define REG_DESC_ADDRESS_SHIFT 0
+#define REG_DESC_SIZE_MASK 0xFF
+#define REG_DESC_SIZE_SHIFT 24
+};
+
+/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block_data {
+ u8 enabled /* Indicates if the block is enabled for recording (0/1) */;
+ u8 hw_id /* HW ID associated with the block */;
+ u8 line_num /* Debug line number to select */;
+ u8 right_shift /* Number of units to right the debug data (0-3) */;
+ u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */;
+ u8 force_valid /* 4-bit value: bit i set -> unit i is forced valid. */;
+ u8 force_frame
+ /* 4-bit value: bit i set -> unit i frame bit is forced. */;
+ u8 reserved;
+};
+
+/*
+ * Debug Bus Clients
+ */
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/*
+ * Debug Bus constraint operation types
+ */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ /* equal */,
+ DBG_BUS_CONSTRAINT_OP_NE /* not equal */,
+ DBG_BUS_CONSTRAINT_OP_LT /* less than */,
+ DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */,
+ DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_GT /* greater than */,
+ DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */,
+ DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/*
+ * Debug Bus memory address
+ */
+struct dbg_bus_mem_addr {
+ __le32 lo;
+ __le32 hi;
+};
+
+/*
+ * Debug Bus PCI buffer data
+ */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
+ struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
+ __le32 size /* PCI buffer size in bytes */;
+};
+
+/*
+ * Debug Bus Storm EID range filter params
+ */
+struct dbg_bus_storm_eid_range_params {
+ u8 min /* Minimal event ID to filter on */;
+ u8 max /* Maximal event ID to filter on */;
+};
+
+/*
+ * Debug Bus Storm EID mask filter params
+ */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val /* Event ID value */;
+ u8 mask /* Event ID mask. 1s in the mask = dont care bits. */;
+};
+
+/*
+ * Debug Bus Storm EID filter params
+ */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range
+ /* EID range filter params */;
+ struct dbg_bus_storm_eid_mask_params mask /* EID mask filter params */;
+};
+
+/*
+ * Debug Bus Storm data
+ */
+struct dbg_bus_storm_data {
+ u8 fast_enabled;
+ u8 fast_mode
+ /* Fast debug Storm mode, valid only if fast_enabled is set */;
+ u8 slow_enabled;
+ u8 slow_mode
+ /* Slow debug Storm mode, valid only if slow_enabled is set */;
+ u8 hw_id /* HW ID associated with the Storm */;
+ u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ __le16 reserved;
+ __le32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
+};
+
+/*
+ * Debug Bus data
+ */
+struct dbg_bus_data {
+ __le32 app_version /* The tools version number of the application */;
+ u8 state /* The current debug bus state */;
+ u8 hw_dwords /* HW dwords per cycle */;
+ u8 next_hw_id /* Next HW ID to be associated with an input */;
+ u8 num_enabled_blocks /* Number of blocks enabled for recording */;
+ u8 num_enabled_storms /* Number of Storms enabled for recording */;
+ u8 target /* Output target */;
+ u8 next_trigger_state /* ID of next trigger state to be added */;
+ u8 next_constraint_id
+ /* ID of next filter/trigger constraint to be added */;
+ u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
+ u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
+ u8 timestamp_input_en
+ /* Indicates if timestamp recording is enabled (0/1) */;
+ u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
+ u8 trigger_en /* Indicates if the recording trigger is enabled (0/1) */
+ ;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
+ u8 unify_inputs;
+ u8 rcv_from_other_engine;
+ struct dbg_bus_pci_buf_data pci_buf;
+ __le16 reserved;
+ struct dbg_bus_block_data blocks[80] /* Debug Bus data for each block */
+ ;
+ struct dbg_bus_storm_data storms[6] /* Debug Bus data for each block */
+ ;
+};
+
+/*
+ * Debug bus filter types
+ */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF /* filter always off */,
+ DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */,
+ DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */,
+ DBG_BUS_FILTER_TYPE_ON /* filter always on */,
+ MAX_DBG_BUS_FILTER_TYPES
+};
+
+/*
+ * Debug bus frame modes
+ */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */,
+ DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */,
+ DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */,
+ MAX_DBG_BUS_FRAME_MODES
+};
+
+/*
+ * Debug bus input types
+ */
+enum dbg_bus_input_types {
+ DBG_BUS_INPUT_TYPE_STORM,
+ DBG_BUS_INPUT_TYPE_BLOCK,
+ MAX_DBG_BUS_INPUT_TYPES
+};
+
+/*
+ * Debug bus other engine mode
+ */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+ MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+/*
+ * Debug bus post-trigger recording types
+ */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */,
+ DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */,
+ MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+/*
+ * Debug bus pre-trigger recording types
+ */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */,
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS
+ /* start recording some chunks before trigger */,
+ DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */,
+ MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+/*
+ * Debug bus SEMI frame modes
+ */
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
+ 0 /* 0 slow dwords, 4 fast dwords */,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
+ 3 /* 4 slow dwords, 0 fast dwords */,
+ MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
+/*
+ * Debug bus states
+ */
+enum dbg_bus_states {
+ DBG_BUS_STATE_BEFORE_RECORD /* before debug bus the recording starts */
+ ,
+ DBG_BUS_STATE_DURING_RECORD /* during debug bus recording */,
+ DBG_BUS_STATE_AFTER_RECORD /* after debug bus recording */,
+ MAX_DBG_BUS_STATES
+};
+
+/*
+ * Debug Bus Storm modes
+ */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */,
+ DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */,
+ DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */,
+ DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */,
+ DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+/*
+ * Debug bus target IDs
+ */
+enum dbg_bus_targets {
+ DBG_BUS_TARGET_ID_INT_BUF
+ /* records debug bus to DBG block internal buffer */,
+ DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */,
+ DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */,
+ MAX_DBG_BUS_TARGETS
+};
+
+/*
+ * GRC Dump data
+ */
+struct dbg_grc_data {
+ u8 is_updated /* Indicates if the GRC Dump data is updated (0/1) */;
+ u8 chip_id /* Chip ID */;
+ u8 chip_mask /* Chip mask */;
+ u8 reserved;
+ __le32 max_dump_dwords /* Max GRC Dump size in dwords */;
+ __le32 param_val[40];
+ u8 param_set_by_user[40];
+};
+
+/*
+ * Debug GRC params
+ */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */,
+ DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */,
+ DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */,
+ DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */,
+ DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */,
+ DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
+ DBG_GRC_PARAM_RESERVED /* reserved */,
+ DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */,
+ DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */,
+ DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */,
+ DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */,
+ DBG_GRC_PARAM_EXCLUDE_ALL
+ /* preset: exclude all memories from dump (1 only) */,
+ DBG_GRC_PARAM_CRASH
+ /* preset: include memories for crash dump (1 only) */,
+ DBG_GRC_PARAM_PARITY_SAFE
+ /* perform dump only if MFW is responding (0/1) */,
+ DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
+ MAX_DBG_GRC_PARAMS
+};
+
+/*
+ * Debug reset registers
+ */
+enum dbg_reset_regs {
+ DBG_RESET_REG_MISCS_PL_UA,
+ DBG_RESET_REG_MISCS_PL_HV,
+ DBG_RESET_REG_MISC_PL_UA,
+ DBG_RESET_REG_MISC_PL_HV,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ DBG_RESET_REG_MISC_PL_PDA_VAUX,
+ MAX_DBG_RESET_REGS
+};
+
+/*
+ * @DPDK Debug status codes
+ */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_TOO_MANY_INPUTS,
+ DBG_STATUS_INPUT_OVERLAP,
+ DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_NO_DATA_TRIGGERED,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ MAX_DBG_STATUS
+};
+
+/*
+ * Debug Storms IDs
+ */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/*
+ * Idle Check data
+ */
+struct idle_chk_data {
+ __le32 buf_size /* Idle check buffer size in dwords */;
+ u8 buf_size_set
+ /* Indicates if the idle check buffer size was set (0/1) */;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/*
+ * Idle Check data
+ */
+struct mcp_trace_data {
+ __le32 buf_size /* MCP Trace buffer size in dwords */;
+ u8 buf_size_set
+ /* Indicates if the MCP Trace buffer size was set (0/1) */;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/*
+ * Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc /* GRC Dump data */;
+ struct dbg_bus_data bus /* Debug Bus data */;
+ struct idle_chk_data idle_chk /* Idle Check data */;
+ struct mcp_trace_data mcp_trace /* MCP Trace data */;
+ u8 block_in_reset[80] /* Indicates if a block is in reset state (0/1) */
+ ;
+ u8 chip_id /* Chip ID (from enum chip_ids) */;
+ u8 chip_mask
+ /* Chip mask = bit index chip_id is set, the rest are cleared */;
+ u8 initialized /* Indicates if the data was initialized */;
+ u8 reset_state_updated
+ /* Indicates if blocks reset state is updated (0/1) */;
+};
+
+/*
+ * BRB RAM init requirements
+ */
+struct init_brb_ram_req {
+ __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
+ __le32 headroom_per_tc /* headroom size per TC, in bytes */;
+ __le32 min_pkt_size /* min packet size, in bytes */;
+ __le32 max_ports_per_engine /* min packet size, in bytes */;
+ u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
+};
+
+/*
+ * ETS per-TC init requirements
+ */
+struct init_ets_tc_req {
+ u8 use_sp;
+ u8 use_wfq;
+ __le16 weight /* An arbitration weight. Valid only if use_wfq is set. */
+ ;
+};
+
+/*
+ * ETS init requirements
+ */
+struct init_ets_req {
+ __le32 mtu /* Max packet size (in bytes) */;
+ struct init_ets_tc_req tc_req[NUM_OF_TCS]
+ /* ETS initialization requirements per TC. */;
+};
+
+/*
+ * NIG LB RL init requirements
+ */
+struct init_nig_lb_rl_req {
+ __le16 lb_mac_rate;
+ __le16 lb_rate;
+ __le32 mtu /* Max packet size (in bytes) */;
+ __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+/*
+ * NIG TC mapping for each priority
+ */
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id /* the mapped TC ID */;
+ u8 valid /* indicates if the mapping entry is valid */;
+};
+
+/*
+ * NIG priority to TC map init requirements
+ */
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
+/*
+ * QM per-port init parameters
+ */
+struct init_qm_port_params {
+ u8 active /* Indicates if this port is active */;
+ u8 num_active_phys_tcs /* number of physical TCs used by this port */;
+ __le16 num_pbf_cmd_lines
+ /* number of PBF command lines that can be used by this port */;
+ __le16 num_btb_blocks
+ /* number of BTB blocks that can be used by this port */;
+ __le16 reserved;
+};
+
+/*
+ * QM per-PQ init parameters
+ */
+struct init_qm_pq_params {
+ u8 vport_id /* VPORT ID */;
+ u8 tc_id /* TC ID */;
+ u8 wrr_group /* WRR group */;
+ u8 reserved;
+};
+
+/*
+ * QM per-vport init parameters
+ */
+struct init_qm_vport_params {
+ __le32 vport_rl;
+ __le16 vport_wfq;
+ __le16 first_tx_pq_id[NUM_OF_TCS]
+ /* the first Tx PQ ID associated with this VPORT for each TC. */;
+};
+
+#endif /* __ECORE_HSI_TOOLS__ */
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
new file mode 100644
index 00000000..5403b94b
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -0,0 +1,910 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+#include "ecore_utils.h"
+#include "ecore_iov_api.h"
+
+#ifndef ASIC_ONLY
+#define ECORE_EMUL_FACTOR 2000
+#define ECORE_FPGA_FACTOR 200
+#endif
+
+#define ECORE_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define ECORE_BAR_INVALID_OFFSET -1
+
+struct ecore_ptt {
+ osal_list_entry_t list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+};
+
+struct ecore_ptt_pool {
+ osal_list_t free_list;
+ osal_spinlock_t lock;
+ struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt_pool *p_pool;
+ int i;
+
+ p_pool = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_ptt_pool));
+ if (!p_pool)
+ return ECORE_NOMEM;
+
+ OSAL_LIST_INIT(&p_pool->free_list);
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_pool->ptts[i].idx = i;
+ p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
+ p_pool->ptts[i].pxp.pretend.control = 0;
+
+ /* There are special PTT entries that are taken only by design.
+ * The rest are added ot the list for general usage.
+ */
+ if (i >= RESERVED_PTT_MAX)
+ OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
+ &p_pool->free_list);
+ }
+
+ p_hwfn->p_ptt_pool = p_pool;
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+ OSAL_SPIN_LOCK_INIT(&p_pool->lock);
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt;
+ int i;
+
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+ p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
+ }
+}
+
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->p_ptt_pool)
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = OSAL_NULL;
+}
+
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt;
+ unsigned int i;
+
+ /* Take the free PTT from the list */
+ for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
+ OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list))
+ break;
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+ OSAL_MSLEEP(1);
+ }
+
+ /* We should not time-out, but it can happen... --> Lock isn't held */
+ if (i == ECORE_BAR_ACQUIRE_TIMEOUT) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate PTT\n");
+ return OSAL_NULL;
+ }
+
+ p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
+ struct ecore_ptt, list_entry);
+ OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
+ &p_hwfn->p_ptt_pool->free_list);
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx);
+
+ return p_ptt;
+}
+
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ /* This PTT should not be set to pretend if it is being released */
+
+ OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+ OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ /* The HW is using DWORDS and we need to translate it to Bytes */
+ return p_ptt->pxp.offset << 2;
+}
+
+static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
+{
+ return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
+{
+ return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 new_hw_addr)
+{
+ u32 prev_hw_addr;
+
+ prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+ if (new_hw_addr == prev_hw_addr)
+ return;
+
+ /* Update PTT entery in admin window */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Updating PTT entry %d to offset 0x%x\n",
+ p_ptt->idx, new_hw_addr);
+
+ /* The HW is using DWORDS and the address is in Bytes */
+ p_ptt->pxp.offset = new_hw_addr >> 2;
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, offset), p_ptt->pxp.offset);
+}
+
+static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+ u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 offset;
+
+ offset = hw_addr - win_hw_addr;
+
+ /* Verify the address is within the window */
+ if (hw_addr < win_hw_addr ||
+ offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+ ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+ offset = 0;
+ }
+
+ return ecore_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx)
+{
+ if (ptt_idx >= RESERVED_PTT_MAX) {
+ DP_NOTICE(p_hwfn, true,
+ "Requested PTT %d is out of range\n", ptt_idx);
+ return OSAL_NULL;
+ }
+
+ return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
+{
+ u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+ REG_WR(p_hwfn, bar_addr, val);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+}
+
+u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+ u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+ u32 val = REG_RD(p_hwfn, bar_addr);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+
+ return val;
+}
+
+static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *addr,
+ u32 hw_addr, osal_size_t n, bool to_device)
+{
+ u32 dw_count, *host_addr, hw_offset;
+ osal_size_t quota, done = 0;
+ u32 OSAL_IOMEM *reg_addr;
+
+ while (done < n) {
+ quota = OSAL_MIN_T(osal_size_t, n - done,
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = ecore_ptt_get_bar_addr(p_ptt);
+ } else {
+ hw_offset = hw_addr + done;
+ }
+
+ dw_count = quota / 4;
+ host_addr = (u32 *)((u8 *)addr + done);
+ reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
+
+ if (to_device)
+ while (dw_count--)
+ DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
+ else
+ while (dw_count--)
+ *host_addr++ = DIRECT_REG_RD(p_hwfn,
+ reg_addr++);
+
+ done += quota;
+ }
+}
+
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *dest, u32 hw_addr, osal_size_t n)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+ hw_addr, dest, hw_addr, (unsigned long)n);
+
+ ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr, void *src, osal_size_t n)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+ hw_addr, hw_addr, src, (unsigned long)n);
+
+ ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 fid)
+{
+ void *p_pretend;
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+ /* Every pretend undos prev pretends, including previous port pretend */
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
+
+ p_pretend = &p_ptt->pxp.pretend;
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+}
+
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port_id)
+{
+ void *p_pretend;
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+ p_ptt->pxp.pretend.control = control;
+
+ p_pretend = &p_ptt->pxp.pretend;
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+}
+
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ void *p_pretend;
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+ p_ptt->pxp.pretend.control = control;
+
+ p_pretend = &p_ptt->pxp.pretend;
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+}
+
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+ u32 concrete_fid = 0;
+
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+ return concrete_fid;
+}
+
+/* Not in use @DPDK
+ * Ecore HW lock
+ * =============
+ * Although the implementation is ready, today we don't have any flow that
+ * utliizes said locks - and we want to keep it this way.
+ * If this changes, this needs to be revisted.
+ */
+
+/* Ecore DMAE
+ * =============
+ */
+static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
+ struct ecore_dmae_params *p_params)
+{
+ u16 opcode_b = 0;
+ u32 opcode = 0;
+
+ /* Whether the source is the PCIe or the GRC.
+ * 0- The source is the PCIe
+ * 1- The source is the GRC.
+ */
+ opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+ : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
+ opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT;
+
+ /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+ opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+ : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
+ opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT;
+
+ /* DMAE_E4_TODO need to check which value to specifiy here. */
+ /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
+
+ /* Whether to write a completion word to the completion destination:
+ * 0-Do not write a completion word
+ * 1-Write the completion word
+ */
+ opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
+ opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+ if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
+ opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
+
+ /* swapping mode 3 - big endian there should be a define ifdefed in
+ * the HSI somewhere. Since it is currently
+ */
+ opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
+
+ opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
+
+ /* reset source address in next go */
+ opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+ /* reset dest address in next go */
+ opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
+
+ /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+ if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
+ opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
+ opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
+ } else {
+ opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT);
+ }
+ if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
+ opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ }
+
+ p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
+}
+
+static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
+{
+ OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
+
+ return DMAE_REG_GO_C0 + idx * 4;
+}
+
+static enum _ecore_status_t
+ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+ u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+
+ /* verify address is not OSAL_NULL */
+ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+ ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
+ DP_NOTICE(p_hwfn, true,
+ "source or destination address 0 idx_cmd=%d\n"
+ "opcode = [0x%08x,0x%04x] len=0x%x"
+ " src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd, (u32)p_command->opcode,
+ (u16)p_command->opcode_b,
+ (int)p_command->length,
+ (int)p_command->src_addr_hi,
+ (int)p_command->src_addr_lo,
+ (int)p_command->dst_addr_hi,
+ (int)p_command->dst_addr_lo);
+
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
+ "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd, (u32)p_command->opcode,
+ (u16)p_command->opcode_b,
+ (int)p_command->length,
+ (int)p_command->src_addr_hi,
+ (int)p_command->src_addr_lo,
+ (int)p_command->dst_addr_hi, (int)p_command->dst_addr_lo);
+
+ /* Copy the command to DMAE - need to do it before every call
+ * for source/dest address no reset.
+ * The number of commands have been increased to 16 (previous was 14)
+ * The first 9 DWs are the command registers, the 10 DW is the
+ * GO register, and
+ * the rest are result registers (which are read only by the client).
+ */
+ for (i = 0; i < DMAE_CMD_SIZE; i++) {
+ u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+ *(((u32 *)p_command) + i) : 0;
+
+ ecore_wr(p_hwfn, p_ptt,
+ DMAE_REG_CMD_MEM +
+ (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+ (i * sizeof(u32)), data);
+ }
+
+ ecore_wr(p_hwfn, p_ptt,
+ ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
+
+ return ecore_status;
+}
+
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+ dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+ struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+ u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+ u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+ *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
+ if (*p_comp == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `p_completion_word'\n");
+ ecore_dmae_info_free(p_hwfn);
+ return ECORE_NOMEM;
+ }
+
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+ sizeof(struct dmae_cmd));
+ if (*p_cmd == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct dmae_cmd'\n");
+ ecore_dmae_info_free(p_hwfn);
+ return ECORE_NOMEM;
+ }
+
+ p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+ sizeof(u32) * DMAE_MAX_RW_SIZE);
+ if (*p_buff == OSAL_NULL) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `intermediate_buffer'\n");
+ ecore_dmae_info_free(p_hwfn);
+ return ECORE_NOMEM;
+ }
+
+ /* DMAE_E4_TODO : Need to change this to reflect proper channel */
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
+{
+ dma_addr_t p_phys;
+
+ /* Just make sure no one is in the middle */
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+ if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_completion_word,
+ p_phys, sizeof(u32));
+ p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_dmae_cmd,
+ p_phys, sizeof(struct dmae_cmd));
+ p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
+ p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_hwfn->dmae_info.p_intermediate_buffer,
+ p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
+ p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
+ }
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+}
+
+static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+ u32 wait_cnt_limit = 10000, wait_cnt = 0;
+
+#ifndef ASIC_ONLY
+ u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
+ ECORE_EMUL_FACTOR :
+ (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
+ ECORE_FPGA_FACTOR : 1));
+
+ wait_cnt_limit *= factor;
+#endif
+
+ /* DMAE_E4_TODO : TODO check if we have to call any other function
+ * other than BARRIER to sync the completion_word since we are not
+ * using the volatile keyword for this
+ */
+ OSAL_BARRIER(p_hwfn->p_dev);
+ while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+ /* DMAE_E4_TODO : using OSAL_MSLEEP instead of mm_wait since mm
+ * functions are getting depriciated. Need to review for future.
+ */
+ OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
+ if (++wait_cnt > wait_cnt_limit) {
+ DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
+ "Timed-out waiting for operation to"
+ " complete. Completion word is 0x%08x"
+ " expected 0x%08x.\n",
+ *p_hwfn->dmae_info.p_completion_word,
+ DMAE_COMPLETION_VAL);
+ ecore_status = ECORE_TIMEOUT;
+ break;
+ }
+ /* to sync the completion_word since we are not
+ * using the volatile keyword for p_completion_word
+ */
+ OSAL_BARRIER(p_hwfn->p_dev);
+ }
+
+ if (ecore_status == ECORE_SUCCESS)
+ *p_hwfn->dmae_info.p_completion_word = 0;
+
+ return ecore_status;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type, u8 dst_type, u32 length)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+ switch (src_type) {
+ case ECORE_DMAE_ADDRESS_GRC:
+ case ECORE_DMAE_ADDRESS_HOST_PHYS:
+ cmd->src_addr_hi = DMA_HI(src_addr);
+ cmd->src_addr_lo = DMA_LO(src_addr);
+ break;
+ /* for virt source addresses we use the intermediate buffer. */
+ case ECORE_DMAE_ADDRESS_HOST_VIRT:
+ cmd->src_addr_hi = DMA_HI(phys);
+ cmd->src_addr_lo = DMA_LO(phys);
+ OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+ (void *)(osal_uintptr_t)src_addr,
+ length * sizeof(u32));
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ switch (dst_type) {
+ case ECORE_DMAE_ADDRESS_GRC:
+ case ECORE_DMAE_ADDRESS_HOST_PHYS:
+ cmd->dst_addr_hi = DMA_HI(dst_addr);
+ cmd->dst_addr_lo = DMA_LO(dst_addr);
+ break;
+ /* for virt destination address we use the intermediate buff. */
+ case ECORE_DMAE_ADDRESS_HOST_VIRT:
+ cmd->dst_addr_hi = DMA_HI(phys);
+ cmd->dst_addr_lo = DMA_LO(phys);
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ cmd->length = (u16)length;
+
+ if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+ src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+ OSAL_DMA_SYNC(p_hwfn->p_dev,
+ (void *)HILO_U64(cmd->src_addr_hi,
+ cmd->src_addr_lo),
+ length * sizeof(u32), false);
+
+ ecore_dmae_post_command(p_hwfn, p_ptt);
+
+ ecore_status = ecore_dmae_operation_wait(p_hwfn);
+
+ /* TODO - is it true ? */
+ if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+ src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+ OSAL_DMA_SYNC(p_hwfn->p_dev,
+ (void *)HILO_U64(cmd->src_addr_hi,
+ cmd->src_addr_lo),
+ length * sizeof(u32), true);
+
+ if (ecore_status != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, ECORE_MSG_HW,
+ "ecore_dmae_host2grc: Wait Failed. source_addr"
+ " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
+ (unsigned long)src_addr, (unsigned long)dst_addr,
+ length);
+ return ecore_status;
+ }
+
+ if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
+ OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
+ &p_hwfn->dmae_info.p_intermediate_buffer[0],
+ length * sizeof(u32));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+ u64 src_addr_split = 0, dst_addr_split = 0;
+ u16 length_limit = DMAE_MAX_RW_SIZE;
+ u32 offset = 0;
+
+ ecore_dmae_opcode(p_hwfn,
+ (src_type == ECORE_DMAE_ADDRESS_GRC),
+ (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
+
+ cmd->comp_addr_lo = DMA_LO(phys);
+ cmd->comp_addr_hi = DMA_HI(phys);
+ cmd->comp_val = DMAE_COMPLETION_VAL;
+
+ /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+ cnt_split = size_in_dwords / length_limit;
+ length_mod = size_in_dwords % length_limit;
+
+ src_addr_split = src_addr;
+ dst_addr_split = dst_addr;
+
+ for (i = 0; i <= cnt_split; i++) {
+ offset = length_limit * i;
+
+ if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
+ if (src_type == ECORE_DMAE_ADDRESS_GRC)
+ src_addr_split = src_addr + offset;
+ else
+ src_addr_split = src_addr + (offset * 4);
+ }
+
+ if (dst_type == ECORE_DMAE_ADDRESS_GRC)
+ dst_addr_split = dst_addr + offset;
+ else
+ dst_addr_split = dst_addr + (offset * 4);
+
+ length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+ /* might be zero on last iteration */
+ if (!length_cur)
+ continue;
+
+ ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
+ p_ptt,
+ src_addr_split,
+ dst_addr_split,
+ src_type,
+ dst_type,
+ length_cur);
+ if (ecore_status != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "ecore_dmae_execute_sub_operation Failed"
+ " with error 0x%x. source_addr 0x%lx,"
+ " dest addr 0x%lx, size_in_dwords 0x%x\n",
+ ecore_status, (unsigned long)src_addr,
+ (unsigned long)dst_addr, length_cur);
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
+ break;
+ }
+ }
+
+ return ecore_status;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = flags;
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ grc_addr_in_dw,
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ ECORE_DMAE_ADDRESS_GRC,
+ size_in_dwords, &params);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = flags;
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+ dest_addr, ECORE_DMAE_ADDRESS_GRC,
+ ECORE_DMAE_ADDRESS_HOST_VIRT,
+ size_in_dwords, &params);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct ecore_dmae_params *p_params)
+{
+ enum _ecore_status_t rc;
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+ rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ dest_addr,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ ECORE_DMAE_ADDRESS_HOST_PHYS,
+ size_in_dwords, p_params);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union ecore_qm_pq_params *p_params)
+{
+ u16 pq_id = 0;
+
+ if ((proto == PROTOCOLID_CORE ||
+ proto == PROTOCOLID_ETH) && !p_params) {
+ DP_NOTICE(p_hwfn, true,
+ "Protocol %d received NULL PQ params\n", proto);
+ return 0;
+ }
+
+ switch (proto) {
+ case PROTOCOLID_CORE:
+ if (p_params->core.tc == LB_TC)
+ pq_id = p_hwfn->qm_info.pure_lb_pq;
+ else if (p_params->core.tc == OOO_LB_TC)
+ pq_id = p_hwfn->qm_info.ooo_pq;
+ else
+ pq_id = p_hwfn->qm_info.offload_pq;
+ break;
+ case PROTOCOLID_ETH:
+ pq_id = p_params->eth.tc;
+ /* TODO - multi-CoS for VFs? */
+ if (p_params->eth.is_vf)
+ pq_id += p_hwfn->qm_info.vf_queues_offset +
+ p_params->eth.vf_id;
+ break;
+ default:
+ pq_id = 0;
+ }
+
+ pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
+
+ return pq_id;
+}
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type)
+{
+ /* Fan failure cannot be masked by handling of another HW error */
+ if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
+ "Recovery is in progress."
+ "Avoid notifying about HW error %d.\n",
+ err_type);
+ return;
+ }
+
+ OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
+}
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
new file mode 100644
index 00000000..89499447
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HW_H__
+#define __ECORE_HW_H__
+
+#include "ecore.h"
+#include "ecore_dev_api.h"
+
+/* Forward decleration */
+struct ecore_ptt;
+
+enum reserved_ptts {
+ RESERVED_PTT_EDIAG,
+ RESERVED_PTT_USER_SPACE,
+ RESERVED_PTT_MAIN,
+ RESERVED_PTT_DPC,
+ RESERVED_PTT_MAX
+};
+
+/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1
+ * instead of 0, this should be fixed in later HW versions.
+ */
+#ifndef MISC_REG_DRIVER_CONTROL_0
+#define MISC_REG_DRIVER_CONTROL_0 MISC_REG_DRIVER_CONTROL_1
+#endif
+#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE
+#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE
+#endif
+
+enum _dmae_cmd_dst_mask {
+ DMAE_CMD_DST_MASK_NONE = 0,
+ DMAE_CMD_DST_MASK_PCIE = 1,
+ DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+ DMAE_CMD_SRC_MASK_PCIE = 0,
+ DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+ DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+ DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE 0x1
+
+#ifdef __BIG_ENDIAN
+#define DMAE_COMPLETION_VAL 0xAED10000
+#define DMAE_CMD_ENDIANITY 0x3
+#else
+#define DMAE_COMPLETION_VAL 0xD1AE
+#define DMAE_CMD_ENDIANITY 0x2
+#endif
+
+#define DMAE_CMD_SIZE 14
+/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
+#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
+/* Minimum wait for dmae opertaion to complete 2 milliseconds */
+#define DMAE_MIN_WAIT_TIME 0x2
+#define DMAE_MAX_CLIENTS 32
+
+/**
+* @brief ecore_gtt_init - Initialize GTT windows
+*
+* @param p_hwfn
+*/
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t - success (0), negative - error.
+ */
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 new_hw_addr);
+
+/**
+ * @brief ecore_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct ecore_ptt *
+ */
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+/**
+ * @brief ecore_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 hw_addr, u32 val);
+
+/**
+ * @brief ecore_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr);
+
+/**
+ * @brief ecore_memcpy_from - copy n bytes from BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *dest, u32 hw_addr, osal_size_t n);
+
+/**
+ * @brief ecore_memcpy_to - copy n bytes to BAR using the given
+ * ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr, void *src, osal_size_t n);
+/**
+ * @brief ecore_fid_pretend - pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ */
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 fid);
+
+/**
+ * @brief ecore_port_pretend - pretend to another port when
+ * accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port_id);
+
+/**
+ * @brief ecore_port_unpretend - cancel any previously set port
+ * pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_vfid_to_concrete - build a concrete FID for a
+ * given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
+
+/**
+* @brief ecore_dmae_info_alloc - Init the dmae_info structure
+* which is part of p_hwfn.
+* @param p_hwfn
+*/
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_dmae_info_free - Free the dmae_info structure
+* which is part of p_hwfn
+*
+* @param p_hwfn
+*/
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
+
+union ecore_qm_pq_params {
+ struct {
+ u8 tc;
+ } core;
+
+ struct {
+ u8 is_vf;
+ u8 vf_id;
+ u8 tc;
+ } eth;
+};
+
+u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto, union ecore_qm_pq_params *params);
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+ const u8 *fw_data);
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type);
+
+#endif /* __ECORE_HW_H__ */
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
new file mode 100644
index 00000000..fa518cec
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hw_defs.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _ECORE_IGU_DEF_H_
+#define _ECORE_IGU_DEF_H_
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_SHIFT 0
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
+#define IGU_CTRL_REG_RESERVED_MASK 0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT 28
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT 31
+};
+
+#endif
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
new file mode 100644
index 00000000..5324e052
--- /dev/null
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -0,0 +1,1275 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hw.h"
+#include "ecore_init_ops.h"
+#include "reg_addr.h"
+#include "ecore_rt_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_tools.h"
+#include "ecore_init_fw_funcs.h"
+
+/* @DPDK CmInterfaceEnum */
+enum cm_interface_enum {
+ MCM_SEC,
+ MCM_PRI,
+ UCM_SEC,
+ UCM_PRI,
+ TCM_SEC,
+ TCM_PRI,
+ YCM_SEC,
+ YCM_PRI,
+ XCM_SEC,
+ XCM_PRI,
+ NUM_OF_CM_INTERFACES
+};
+/* general constants */
+#define QM_PQ_MEM_4KB(pq_size) \
+(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) \
+(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID 0xffff
+/* feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND 62500000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+#define QM_WFQ_VP_PQ_PF_SHIFT 5
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL 43750000
+/* RL constants */
+#define QM_RL_UPPER_BOUND 62500000
+#define QM_RL_PERIOD 5
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate) \
+OSAL_MAX_T(u32, (((rate ? rate : 1000000) * QM_RL_PERIOD * 1.01) / 8), 1)
+#define QM_RL_MAX_INC_VAL 43750000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+#define EAGLE_WORKAROUND_TC 7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES 150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 /* eagle workaround CmdQ */
+#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
+(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
+- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
+(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS /* headroom per-port */
+#define BTB_EAGLE_WORKAROUND_BLOCKS 4 /* eagle workaround blocks */
+#define BTB_PURE_LB_FACTOR 10
+#define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 0x2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
+((port) * (max_phys_tcs_per_port) + (tc))
+#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phys_tcs_per_port) \
+((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+ if (pf_rl_en) {
+ /* enable RLs for all VOQs */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (1 << MAX_NUM_VOQS) - 1);
+ /* write RL period */
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (pf_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+ vport_rl_en ? 1 : 0);
+ if (vport_rl_en) {
+ /* write RL period (use timer 0 only) */
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ /* set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ QM_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+ vport_wfq_en ? 1 : 0);
+ /* set credit threshold for QM bypass flow */
+ if (vport_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+ QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 voq, u16 cmdq_lines)
+{
+ u32 qm_line_crd;
+ bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
+ if (is_bb_a0)
+ cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
+ qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+ qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u8 tc, voq, port_id;
+ bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
+ /* clear PBF lines for all VOQs */
+ for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (port_params[port_id].active) {
+ u16 phys_lines, phys_lines_per_tc;
+ phys_lines =
+ port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+ if (eagle_workaround)
+ phys_lines -= PBF_CMDQ_EAGLE_WORKAROUND_LINES;
+ /* find #lines per active physical TC */
+ phys_lines_per_tc =
+ phys_lines /
+ port_params[port_id].num_active_phys_tcs;
+ /* init registers per active TC */
+ for (tc = 0;
+ tc < port_params[port_id].num_active_phys_tcs;
+ tc++) {
+ voq =
+ PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
+ }
+ /* init registers for pure LB TC */
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
+ /* init registers for eagle workaround */
+ if (eagle_workaround) {
+ voq =
+ PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ PBF_CMDQ_EAGLE_WORKAROUND_LINES);
+ }
+ }
+ }
+}
+
+/*
+ * Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ * B BTB blocks for this port
+ * C Number of physical TCs for this port
+ * 2. Calculation:
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom
+ * b. B = B 38 (remainder after global headroom allocation)
+ * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space is
+ * not enabled and allocated for each TC.
+ */
+static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u8 tc, voq, port_id;
+ u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (port_params[port_id].active) {
+ /* subtract headroom blocks */
+ usable_blocks =
+ port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+ if (eagle_workaround)
+ usable_blocks -= BTB_EAGLE_WORKAROUND_BLOCKS;
+ pure_lb_blocks =
+ (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (port_params[port_id].num_active_phys_tcs *
+ BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
+ pure_lb_blocks =
+ OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks / BTB_PURE_LB_FACTOR);
+ phys_blocks =
+ (usable_blocks -
+ pure_lb_blocks) /
+ port_params[port_id].num_active_phys_tcs;
+ /* init physical TCs */
+ for (tc = 0;
+ tc < port_params[port_id].num_active_phys_tcs;
+ tc++) {
+ voq =
+ PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+ phys_blocks);
+ }
+ /* init pure LB TC */
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ
+ (port_id)),
+ pure_lb_blocks);
+ /* init eagle workaround */
+ if (eagle_workaround) {
+ voq =
+ PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+ BTB_EAGLE_WORKAROUND_BLOCKS);
+ }
+ }
+ }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_first_pf,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u32 base_mem_addr_4kb,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params)
+{
+ u16 i, pq_id, pq_group;
+ u16 num_pqs = num_pf_pqs + num_vf_pqs;
+ u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
+ bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
+ /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
+ u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+ /* set mapping from PQ group to PF */
+ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+ QM_PQ_SIZE_256B(num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+ QM_PQ_SIZE_256B(num_vf_cids));
+ /* go over all Tx PQs */
+ for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
+ struct qm_rf_pq_map tx_pq_map;
+ u8 voq =
+ VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ bool is_vf_pq = (i >= num_pf_pqs);
+ /* update first Tx PQ of VPORT/TC */
+ u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
+ u16 first_tx_pq_id =
+ vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
+ tc_id];
+ if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ /* create new VP PQ */
+ vport_params[vport_id_in_pf].
+ first_tx_pq_id[pq_params[i].tc_id] = pq_id;
+ first_tx_pq_id = pq_id;
+ /* map VP PQ to VOQ and PF */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
+ (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
+ QM_WFQ_VP_PQ_PF_SHIFT));
+ }
+ /* fill PQ map entry */
+ OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+ is_vf_pq ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+ is_vf_pq ? pq_params[i].vport_id : 0);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+ SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+ pq_params[i].wrr_group);
+ /* write PQ map entry to CAM */
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+ *((u32 *)&tx_pq_map));
+ /* set base address */
+ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ /* check if VF PQ */
+ if (is_vf_pq) {
+ tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+ (1 << (pq_id % tx_pq_vf_mask_width));
+ mem_addr_4kb += vport_pq_mem_4kb;
+ } else {
+ mem_addr_4kb += pq_mem_4kb;
+ }
+ }
+ /* store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++) {
+ if (tx_pq_vf_mask[i]) {
+ if (is_bb_a0) {
+ u32 curr_mask =
+ is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
+ QM_REG_MAXPQSIZETXSEL_0
+ + i * 4);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+ i, curr_mask | tx_pq_vf_mask[i]);
+ } else
+ STORE_RT_REG(p_hwfn,
+ QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+ i, tx_pq_vf_mask[i]);
+ }
+ }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_tids, u32 base_mem_addr_4kb)
+{
+ u16 i, pq_id;
+ u16 pq_group = pf_id;
+ u32 pq_size = num_pf_cids + num_tids;
+ u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ u32 mem_addr_4kb = base_mem_addr_4kb;
+ /* map PQ group to PF */
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+ /* set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+ QM_PQ_SIZE_256B(pq_size));
+ /* set base address */
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+ i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+ mem_addr_4kb += pq_mem_4kb;
+ }
+}
+
+static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 pf_id,
+ u16 pf_wfq,
+ u8 max_phys_tcs_per_port,
+ u16 num_tx_pqs,
+ struct init_qm_pq_params *pq_params)
+{
+ u16 i;
+ u32 inc_val;
+ u32 crd_reg_offset =
+ (pf_id <
+ MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
+ inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+ for (i = 0; i < num_tx_pqs; i++) {
+ u8 voq =
+ VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
+ return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF. Return -1 on err */
+static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+ return 0;
+}
+
+static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 tc, i;
+ u32 inc_val;
+ /* go over all PF VPORTs */
+ for (i = 0; i < num_vports; i++) {
+ if (vport_params[i].vport_wfq) {
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight config");
+ return -1;
+ }
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id =
+ vport_params[i].first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET
+ + vport_pq_id, inc_val);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/* Prepare VPORT RL runtime init values for specified VPORT. Ret -1 on error. */
+static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 start_vport,
+ u8 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 i, vport_id;
+ /* go over all PF VPORTs */
+ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+ u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+ QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+ QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+ inc_val);
+ }
+ return 0;
+}
+
+static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 reg_val, i;
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+ i++) {
+ OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
+ reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+ }
+ /* check if timeout while waiting for SDM command ready */
+ if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Timeout waiting for QM SDM cmd ready signal\n");
+ return false;
+ }
+ return true;
+}
+
+static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+{
+ if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+ return false;
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+ return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 ecore_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+{
+ return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ bool pf_rl_en,
+ bool pf_wfq_en,
+ bool vport_rl_en,
+ bool vport_wfq_en,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS])
+{
+ u8 port_id;
+ /* init AFullOprtnstcCrdMask */
+ u32 mask =
+ (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+ /* check eagle workaround */
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (port_params[port_id].active &&
+ port_params[port_id].num_active_phys_tcs >
+ EAGLE_WORKAROUND_TC &&
+ ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't config 8 TCs with Eagle"
+ " eng1 workaround");
+ return -1;
+ }
+ }
+ /* enable/disable PF RL */
+ ecore_enable_pf_rl(p_hwfn, pf_rl_en);
+ /* enable/disable PF WFQ */
+ ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
+ /* enable/disable VPORT RL */
+ ecore_enable_vport_rl(p_hwfn, vport_rl_en);
+ /* enable/disable VPORT WFQ */
+ ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
+ /* init PBF CMDQ line credit */
+ ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
+ max_phys_tcs_per_port, port_params);
+ /* init BTB blocks in PBF */
+ ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
+ max_phys_tcs_per_port, port_params);
+ return 0;
+}
+
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_first_pf,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params)
+{
+ u8 tc, i;
+ u32 other_mem_size_4kb =
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+ /* clear first Tx PQ ID array for each VPORT */
+ for (i = 0; i < num_vports; i++)
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+ /* map Other PQs (if any) */
+#if QM_OTHER_PQS_PER_PF > 0
+ ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
+ num_tids, 0);
+#endif
+ /* map Tx PQs */
+ ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
+ max_phys_tcs_per_port, is_first_pf, num_pf_cids,
+ num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
+ start_vport, other_mem_size_4kb, pq_params,
+ vport_params);
+ /* init PF WFQ */
+ if (pf_wfq)
+ if (ecore_pf_wfq_rt_init
+ (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
+ num_pf_pqs + num_vf_pqs, pq_params) != 0)
+ return -1;
+ /* init PF RL */
+ if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
+ return -1;
+ /* set VPORT WFQ */
+ if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
+ return -1;
+ /* set VPORT RL */
+ if (ecore_vport_rl_rt_init
+ (p_hwfn, start_vport, num_vports, vport_params) != 0)
+ return -1;
+ return 0;
+}
+
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+ ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+ return 0;
+}
+
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+ return -1;
+ }
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+ return 0;
+}
+
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+{
+ u8 tc;
+ u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ ecore_wr(p_hwfn, p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
+ }
+ }
+ return 0;
+}
+
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ if (inc_val > QM_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT rate-limit configuration");
+ return -1;
+ }
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
+ QM_RL_CRD_REG_SIGN_BIT);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+ return 0;
+}
+
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs)
+{
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+ /* set command's PQ type */
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+ /* go over requested PQs */
+ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+ /* set PQ bit in mask (stop command only) */
+ if (!is_release_cmd)
+ pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+ /* if last PQ or end of PQ mask, write command */
+ if ((pq_id == last_pq) ||
+ (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
+ pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
+ pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!ecore_send_qm_cmd
+ (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
+ cmd_arr[1]))
+ return false;
+ pq_mask = 0;
+ }
+ }
+ return true;
+}
+
+/* NIG: ETS configuration constants */
+#define NIG_TX_ETS_CLIENT_OFFSET 4
+#define NIG_LB_ETS_CLIENT_OFFSET 1
+#define NIG_ETS_MIN_WFQ_BYTES 1600
+/* NIG: ETS constants */
+#define NIG_ETS_UP_BOUND(weight, mtu) \
+(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+/* NIG: RL constants */
+#define NIG_RL_BASE_TYPE 1 /* byte base type */
+#define NIG_RL_PERIOD 1 /* in us */
+#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
+#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
+#define NIG_RL_MAX_VAL(inc_val, mtu) \
+(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+/* NIG: packet prioritry configuration constants */
+#define NIG_PRIORITY_MAP_TC_BITS 4
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req, bool is_lb)
+{
+ u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
+ u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
+ u8 tc_client_offset =
+ is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
+ u32 min_weight = 0xffffffff;
+ u32 tc_weight_base_addr =
+ is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ u32 tc_weight_addr_diff =
+ is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ u32 tc_bound_base_addr =
+ is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ u32 tc_bound_addr_diff =
+ is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ for (tc = 0; tc < num_tc; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+ /* update SP map */
+ if (tc_req->use_sp)
+ sp_tc_map |= (1 << tc);
+ if (tc_req->use_wfq) {
+ /* update WFQ map */
+ wfq_tc_map |= (1 << tc);
+ /* find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
+ }
+ }
+ /* write SP map */
+ ecore_wr(p_hwfn, p_ptt,
+ is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
+ NIG_REG_TX_ARB_CLIENT_IS_STRICT,
+ (sp_tc_map << tc_client_offset));
+ /* write WFQ map */
+ ecore_wr(p_hwfn, p_ptt,
+ is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+ (wfq_tc_map << tc_client_offset));
+ /* write WFQ weights */
+ for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+ if (tc_req->use_wfq) {
+ /* translate weight to bytes */
+ u32 byte_weight =
+ (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+ /* write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt,
+ tc_weight_base_addr +
+ tc_weight_addr_diff * tc_client_offset,
+ byte_weight);
+ /* write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt,
+ tc_bound_base_addr +
+ tc_bound_addr_diff * tc_client_offset,
+ NIG_ETS_UP_BOUND(byte_weight, req->mtu));
+ }
+ }
+}
+
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_lb_rl_req *req)
+{
+ u8 tc;
+ u32 ctrl, inc_val, reg_offset;
+ /* disable global MAC+LB RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+ /* configure and enable global MAC+LB RL */
+ if (req->lb_mac_rate) {
+ /* configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
+ NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
+ inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
+ NIG_RL_MAX_VAL(inc_val, req->mtu));
+ /* enable */
+ ctrl |=
+ 1 <<
+ NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+ }
+ /* disable global LB-only RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+ /* configure and enable global LB-only RL */
+ if (req->lb_rate) {
+ /* configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
+ NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->lb_rate);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
+ inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
+ NIG_RL_MAX_VAL(inc_val, req->mtu));
+ /* enable */
+ ctrl |=
+ 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+ }
+ /* per-TC RLs */
+ for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
+ tc++, reg_offset += 4) {
+ /* disable TC RL */
+ ctrl =
+ NIG_RL_BASE_TYPE <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
+ /* configure and enable TC RL */
+ if (req->tc_rate[tc]) {
+ /* configure */
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
+ reg_offset, NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
+ reg_offset, inc_val);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
+ reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
+ /* enable */
+ ctrl |=
+ 1 <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
+ ctrl);
+ }
+ }
+}
+
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_pri_tc_map_req *req)
+{
+ u8 pri, tc;
+ u32 pri_tc_mask = 0;
+ u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
+ for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
+ if (req->pri[pri].valid) {
+ pri_tc_mask |=
+ (req->pri[pri].
+ tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
+ tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
+ }
+ }
+ /* write priority -> TC mask */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
+ /* write TC -> priority mask */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
+ tc_pri_mask[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
+ tc_pri_mask[tc]);
+ }
+}
+
+/* PRS: ETS configuration constants */
+#define PRS_ETS_MIN_WFQ_BYTES 1600
+#define PRS_ETS_UP_BOUND(weight, mtu) \
+(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_ets_req *req)
+{
+ u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
+ u32 min_weight = 0xffffffff;
+ u32 tc_weight_addr_diff =
+ PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+ u32 tc_bound_addr_diff =
+ PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
+ PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+ /* update SP map */
+ if (tc_req->use_sp)
+ sp_tc_map |= (1 << tc);
+ if (tc_req->use_wfq) {
+ /* update WFQ map */
+ wfq_tc_map |= (1 << tc);
+ /* find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
+ }
+ }
+ /* write SP map */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
+ /* write WFQ map */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
+ wfq_tc_map);
+ /* write WFQ weights */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+ if (tc_req->use_wfq) {
+ /* translate weight to bytes */
+ u32 byte_weight =
+ (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+ /* write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
+ tc * tc_weight_addr_diff, byte_weight);
+ /* write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
+ tc * tc_bound_addr_diff,
+ PRS_ETS_UP_BOUND(byte_weight, req->mtu));
+ }
+ }
+}
+
+/* BRB: RAM configuration constants */
+#define BRB_TOTAL_RAM_BLOCKS_BB 4800
+#define BRB_TOTAL_RAM_BLOCKS_K2 5632
+#define BRB_BLOCK_SIZE 128 /* in bytes */
+#define BRB_MIN_BLOCKS_PER_TC 9
+#define BRB_HYST_BYTES 10240
+#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
+/*
+ * temporary big RAM allocation - should be updated
+ */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
+{
+ u8 port, active_ports = 0;
+ u32 active_port_blocks, reg_offset = 0;
+ u32 tc_headroom_blocks =
+ (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
+ u32 min_pkt_size_blocks =
+ (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
+ u32 total_blocks =
+ ECORE_IS_K2(p_hwfn->
+ p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
+ BRB_TOTAL_RAM_BLOCKS_BB;
+ /* find number of active ports */
+ for (port = 0; port < MAX_NUM_PORTS; port++)
+ if (req->num_active_tcs[port])
+ active_ports++;
+ active_port_blocks = (u32)(total_blocks / active_ports);
+ for (port = 0; port < req->max_ports_per_engine; port++) {
+ /* calculate per-port sizes */
+ u32 tc_guaranteed_blocks =
+ (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
+ u32 port_blocks =
+ req->num_active_tcs[port] ? active_port_blocks : 0;
+ u32 port_guaranteed_blocks =
+ req->num_active_tcs[port] * tc_guaranteed_blocks;
+ u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
+ u32 full_xoff_th =
+ req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
+ u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
+ u32 pause_xoff_th = tc_headroom_blocks;
+ u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+ u8 tc;
+ /* init total size per port */
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
+ port_blocks);
+ /* init shared size per port */
+ ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
+ port_shared_blocks);
+ for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
+ /* clear init values for non-active TCs */
+ if (tc == req->num_active_tcs[port]) {
+ tc_guaranteed_blocks = 0;
+ full_xoff_th = 0;
+ full_xon_th = 0;
+ pause_xoff_th = 0;
+ pause_xon_th = 0;
+ }
+ /* init guaranteed size per TC */
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_TC_GUARANTIED_0 + reg_offset,
+ tc_guaranteed_blocks);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
+ BRB_HYST_BLOCKS);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
+ reg_offset, full_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
+ reg_offset, full_xon_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
+ reg_offset, pause_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
+ reg_offset, pause_xon_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
+ reg_offset, full_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
+ reg_offset, full_xon_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
+ reg_offset, pause_xoff_th);
+ ecore_wr(p_hwfn, p_ptt,
+ BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
+ reg_offset, pause_xon_th);
+ }
+ }
+}
+
+/*In MF should be called once per engine to set EtherType of OuterTag*/
+void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 eth_type)
+{
+ /* update PRS register */
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+ /* update NIG register */
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+ /* update PBF register */
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+}
+
+/*In MF should be called once per port to set EtherType of OuterTag*/
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 eth_type)
+{
+ /* update DORQ register */
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, eth_type);
+}
+
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
+#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 dest_port)
+{
+ /* update PRS register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+ /* update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+ /* update PBF register */
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool vxlan_enable)
+{
+ u32 reg_val;
+ /* update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
+ vxlan_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+ }
+ /* update NIG register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
+ vxlan_enable);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+ /* update DORQ register */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+ vxlan_enable ? 1 : 0);
+}
+
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable)
+{
+ u32 reg_val;
+ /* update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
+ eth_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
+ ip_gre_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+ }
+ /* update NIG register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
+ eth_gre_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
+ ip_gre_enable);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+ /* update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+ eth_gre_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+ ip_gre_enable ? 1 : 0);
+}
+
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 dest_port)
+{
+ /* geneve tunnel not supported in BB_A0 */
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+ return;
+ /* update PRS register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+ /* update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+ /* update PBF register */
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable)
+{
+ u32 reg_val;
+ /* geneve tunnel not supported in BB_A0 */
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+ return;
+ /* update PRS register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
+ eth_geneve_enable);
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
+ ip_geneve_enable);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+ }
+ /* update NIG register */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+ eth_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
+ ip_geneve_enable ? 1 : 0);
+ /* comp ver */
+ reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+ ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+ /* EDPM with geneve tunnel not supported in BB_B0 */
+ if (ECORE_IS_BB_B0(p_hwfn->p_dev))
+ return;
+ /* update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+ eth_geneve_enable ? 1 : 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
new file mode 100644
index 00000000..5280cd7a
--- /dev/null
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _INIT_FW_FUNCS_H
+#define _INIT_FW_FUNCS_H
+/* forward declarations */
+struct init_qm_pq_params;
+/**
+ * @brief ecore_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 ecore_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
+/**
+ * @brief ecore_qm_common_rt_init -
+ * Prepare QM runtime init values for the engine phase
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param port_params- array of size MAX_NUM_PORTS with parameters for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ bool pf_rl_en,
+ bool pf_wfq_en,
+ bool vport_rl_en,
+ bool vport_wfq_en,
+ struct init_qm_port_params
+ port_params[MAX_NUM_PORTS]);
+
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_first_pf,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params);
+/**
+ * @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_wfq - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
+/**
+ * @brief ecore_init_pf_rl Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl);
+/**
+ * @brief ecore_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * ecore_qm_pf_rt_init
+ * @param vport_wfq - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+/**
+ * @brief ecore_init_vport_rl Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+/**
+ * @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while
+ * waiting for QM command done.
+ */
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs);
+/**
+ * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the NIG ETS initialization requirements.
+ * @param is_lb - if set, the loopback port arbiter is initialized, otherwise
+ * the physical port arbiter is initialized. The pure-LB TC
+ * requirements are ignored when is_lb is cleared.
+ */
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req, bool is_lb);
+/**
+ * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
+ *
+ * Based on global and per-TC rate requirements
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the NIG LB RLs initialization requirements.
+ */
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_lb_rl_req *req);
+/**
+ * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
+ *
+ * Assumes valid arguments.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - required mapping from prioirties to TCs.
+ */
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_nig_pri_tc_map_req *req);
+/**
+ * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the PRS ETS initialization requirements.
+ */
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_ets_req *req);
+/**
+ * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param req - the BRB RAM initialization requirements.
+ */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_brb_ram_req *req);
+/**
+ * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf
+ * and llh ethType Regs to input ethType
+ * should Be called once per engine if engine is in BD mode.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param ethType - etherType to configure
+ */
+void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 eth_type);
+/**
+ * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs
+ * to input ethType
+ * should Be called once per port.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param ethType - etherType to configure
+ */
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 eth_type);
+/**
+ * @brief ecore_set_vxlan_dest_port - init vxlan tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 dest_port);
+/**
+ * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool vxlan_enable);
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable - IP GRE enable enable flag.
+ */
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable);
+/**
+ * @brief ecore_set_geneve_dest_port - init geneve tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
+ */
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 dest_port);
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ */
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable);
+#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
new file mode 100644
index 00000000..326eb926
--- /dev/null
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/* include the precompiled configuration values - only once */
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_status.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_fw_funcs.h"
+
+#include "ecore_iro_values.h"
+#include "ecore_sriov.h"
+#include "ecore_gtt_values.h"
+#include "reg_addr.h"
+#include "ecore_init_ops.h"
+
+#define ECORE_INIT_MAX_POLL_COUNT 100
+#define ECORE_INIT_POLL_PERIOD_US 500
+
+void ecore_init_iro_array(struct ecore_dev *p_dev)
+{
+ p_dev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+ p_hwfn->rt_data.b_valid[i] = false;
+}
+
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
+{
+ p_hwfn->rt_data.init_val[rt_offset] = val;
+ p_hwfn->rt_data.b_valid[rt_offset] = true;
+}
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset, u32 *p_val, osal_size_t size)
+{
+ osal_size_t i;
+
+ for (i = 0; i < size / sizeof(u32); i++) {
+ p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+ p_hwfn->rt_data.b_valid[rt_offset + i] = true;
+ }
+}
+
+static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr,
+ u16 rt_offset,
+ u16 size, bool b_must_dmae)
+{
+ u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+ bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 i, segment;
+
+ /* Since not all RT entries are initialized, go over the RT and
+ * for each segment of initialized values use DMA.
+ */
+ for (i = 0; i < size; i++) {
+ if (!p_valid[i])
+ continue;
+
+ /* In case there isn't any wide-bus configuration here,
+ * simply write the data instead of using dmae.
+ */
+ if (!b_must_dmae) {
+ ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+ continue;
+ }
+
+ /* Start of a new segment */
+ for (segment = 1; i + segment < size; segment++)
+ if (!p_valid[i + segment])
+ break;
+
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)(p_init_val + i),
+ addr + (i << 2), segment, 0);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Jump over the entire segment, including invalid entry */
+ i += segment;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(bool) * RUNTIME_ARRAY_SIZE);
+ if (!rt_data->b_valid)
+ return ECORE_NOMEM;
+
+ rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(u32) * RUNTIME_ARRAY_SIZE);
+ if (!rt_data->init_val) {
+ OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
+ return ECORE_NOMEM;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_init_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
+}
+
+static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr,
+ u32 dmae_data_offset,
+ u32 size, const u32 *p_buf,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Perform DMAE only for lengthy enough sections or for wide-bus */
+#ifndef ASIC_ONLY
+ if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
+ !b_can_dmae || (!b_must_dmae && (size < 16))) {
+#else
+ if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+#endif
+ const u32 *data = p_buf + dmae_data_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+ } else {
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)(p_buf +
+ dmae_data_offset),
+ addr, size, 0);
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 fill,
+ u32 fill_count)
+{
+ static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+ OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+ return ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (osal_uintptr_t)&zero_buffer[0],
+ addr, fill_count,
+ ECORE_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 fill, u32 fill_count)
+{
+ u32 i;
+
+ for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+ ecore_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ u32 offset, output_len, input_len, max_size;
+#endif
+ u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ union init_array_hdr *hdr;
+ const u32 *array_data;
+ u32 size, addr, data;
+
+ array_data = p_dev->fw_data->arr_data;
+ data = OSAL_LE32_TO_CPU(cmd->data);
+ addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+
+ hdr = (union init_array_hdr *)
+ (uintptr_t)(array_data + dmae_array_offset);
+ data = OSAL_LE32_TO_CPU(hdr->raw.data);
+ switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+ case INIT_ARR_ZIPPED:
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ offset = dmae_array_offset + 1;
+ input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ max_size = MAX_ZIPPED_SIZE * 4;
+ OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
+
+ output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
+ (u8 *)(uintptr_t)&array_data[offset],
+ max_size,
+ (u8 *)p_hwfn->unzip_buf);
+ if (output_len) {
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+ output_len,
+ p_hwfn->unzip_buf,
+ b_must_dmae, b_can_dmae);
+ } else {
+ DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
+ rc = ECORE_INVAL;
+ }
+#else
+ DP_NOTICE(p_hwfn, true,
+ "Using zipped firmware without config enabled\n");
+ rc = ECORE_INVAL;
+#endif
+ break;
+ case INIT_ARR_PATTERN:
+ {
+ u32 repeats = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+ u32 i;
+
+ size = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset +
+ 1, size, array_data,
+ b_must_dmae,
+ b_can_dmae);
+ if (rc)
+ break;
+ }
+ break;
+ }
+ case INIT_ARR_STANDARD:
+ size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+/* init_ops write command */
+static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_write_op *p_cmd,
+ bool b_can_dmae)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ bool b_must_dmae;
+ u32 addr, data;
+
+ data = OSAL_LE32_TO_CPU(p_cmd->data);
+ b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+
+ /* Sanitize */
+ if (b_must_dmae && !b_can_dmae) {
+ DP_NOTICE(p_hwfn, true,
+ "Need to write to %08x for Wide-bus but DMAE isn't"
+ " allowed\n",
+ addr);
+ return ECORE_INVAL;
+ }
+
+ switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+ case INIT_SRC_INLINE:
+ data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
+ ecore_wr(p_hwfn, p_ptt, addr, data);
+ break;
+ case INIT_SRC_ZEROS:
+ data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
+ if (b_must_dmae || (b_can_dmae && (data >= 64)))
+ rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ else
+ ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
+ break;
+ case INIT_SRC_ARRAY:
+ rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
+ b_must_dmae, b_can_dmae);
+ break;
+ case INIT_SRC_RUNTIME:
+ ecore_init_rt(p_hwfn, p_ptt, addr,
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
+ b_must_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
+{
+ return (val == expected_val);
+}
+
+static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
+{
+ return (val & expected_val) == expected_val;
+}
+
+static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
+{
+ return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct init_read_op *cmd)
+{
+ bool (*comp_check)(u32 val, u32 expected_val);
+ u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
+ u32 data, addr, poll;
+ int i;
+
+ data = OSAL_LE32_TO_CPU(cmd->op_data);
+ addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+ poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay *= 100;
+#endif
+
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+
+ if (poll == INIT_POLL_NONE)
+ return;
+
+ switch (poll) {
+ case INIT_POLL_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_POLL_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_POLL_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ cmd->op_data);
+ return;
+ }
+
+ data = OSAL_LE32_TO_CPU(cmd->expected_val);
+ for (i = 0;
+ i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
+ OSAL_UDELAY(delay);
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ }
+
+ if (i == ECORE_INIT_MAX_POLL_COUNT)
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
+ " Got: %08x (comparsion %08x)]\n",
+ addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
+ OSAL_LE32_TO_CPU(cmd->op_data));
+}
+
+/* init_ops callbacks entry point */
+static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
+{
+ DP_NOTICE(p_hwfn, true,
+ "Currently init values have no need of callbacks\n");
+}
+
+static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
+ u16 *p_offset, int modes)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ const u8 *modes_tree_buf;
+ u8 arg1, arg2, tree_val;
+
+ modes_tree_buf = p_dev->fw_data->modes_tree_buf;
+ tree_val = modes_tree_buf[(*p_offset)++];
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
+ case INIT_MODE_OP_OR:
+ arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ return arg1 | arg2;
+ case INIT_MODE_OP_AND:
+ arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ return arg1 & arg2;
+ default:
+ tree_val -= MAX_INIT_MODE_OPS;
+ return (modes & (1 << tree_val)) ? 1 : 0;
+ }
+}
+
+static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
+ struct init_if_mode_op *p_cmd, int modes)
+{
+ u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
+
+ if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
+ return 0;
+ else
+ return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+ INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
+ struct init_if_phase_op *p_cmd,
+ u32 phase, u32 phase_id)
+{
+ u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
+
+ if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+ (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+ GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+ return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+ INIT_IF_PHASE_OP_CMD_OFFSET);
+ else
+ return 0;
+}
+
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int phase, int phase_id, int modes)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 cmd_num, num_init_ops;
+ union init_op *init_ops;
+ bool b_dmae = false;
+
+ num_init_ops = p_dev->fw_data->init_ops_size;
+ init_ops = p_dev->fw_data->init_ops;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ MAX_ZIPPED_SIZE * 4);
+ if (!p_hwfn->unzip_buf) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
+ return ECORE_NOMEM;
+ }
+#endif
+
+ for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+ union init_op *cmd = &init_ops[cmd_num];
+ u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
+
+ switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+ case INIT_OP_WRITE:
+ rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+ b_dmae);
+ break;
+
+ case INIT_OP_READ:
+ ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+ break;
+
+ case INIT_OP_IF_MODE:
+ cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
+ modes);
+ break;
+ case INIT_OP_IF_PHASE:
+ cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ phase, phase_id);
+ b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+ break;
+ case INIT_OP_DELAY:
+ /* ecore_init_run is always invoked from
+ * sleep-able context
+ */
+ OSAL_UDELAY(cmd->delay.delay);
+ break;
+
+ case INIT_OP_CALLBACK:
+ ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
+#endif
+ return rc;
+}
+
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
+{
+ u32 gtt_base;
+ u32 i;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ /* This is done by MFW on ASIC; regardless, this should only
+ * be done once per chip [i.e., common]. Implementation is
+ * not too bright, but it should work on the simple FPGA/EMUL
+ * scenarios.
+ */
+ bool initialized = false; /* @DPDK */
+ int poll_cnt = 500;
+ u32 val;
+
+ /* initialize PTT/GTT (poll for completion) */
+ if (!initialized) {
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_START_INIT_PTT_GTT, 1);
+ initialized = true;
+ }
+
+ do {
+ /* ptt might be overrided by HW until this is done */
+ OSAL_UDELAY(10);
+ ecore_ptt_invalidate(p_hwfn);
+ val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INIT_DONE_PTT_GTT);
+ } while ((val != 1) && --poll_cnt);
+
+ if (!poll_cnt)
+ DP_ERR(p_hwfn,
+ "PGLUE_B_REG_INIT_DONE didn't complete\n");
+ }
+#endif
+
+ /* Set the global windows */
+ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+ for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
+ if (pxp_global_win[i])
+ REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+ pxp_global_win[i]);
+}
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+ const u8 *data)
+{
+ struct ecore_fw_data *fw = p_dev->fw_data;
+
+#ifdef CONFIG_ECORE_BINARY_FW
+ struct bin_buffer_hdr *buf_hdr;
+ u32 offset, len;
+
+ if (!data) {
+ DP_NOTICE(p_dev, true, "Invalid fw data\n");
+ return ECORE_INVAL;
+ }
+
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
+
+ offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+ fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+ fw->arr_data = (u32 *)((uintptr_t)(data + offset));
+
+ offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+ fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
+ len = buf_hdr[BIN_BUF_INIT_CMD].length;
+ fw->init_ops_size = len / sizeof(struct init_raw_op);
+#else
+ fw->init_ops = (union init_op *)init_ops;
+ fw->arr_data = (u32 *)init_val;
+ fw->modes_tree_buf = (u8 *)modes_tree_buf;
+ fw->init_ops_size = init_ops_size;
+#endif
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
new file mode 100644
index 00000000..8a6fce41
--- /dev/null
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INIT_OPS__
+#define __ECORE_INIT_OPS__
+
+#include "ecore.h"
+
+/**
+ * @brief ecore_init_iro_array - init iro_arr.
+ *
+ *
+ * @param p_dev
+ */
+void ecore_init_iro_array(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int phase, int phase_id, int modes);
+
+/**
+ * @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val) \
+ ecore_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+ ecore_init_store_rt_reg(hwfn, offset, val)
+
+/**
+* @brief
+*
+*
+* @param p_hwfn
+* @param rt_offset
+* @param val
+* @param size
+*/
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset, u32 *val, osal_size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+ ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ * Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+#endif /* __ECORE_INIT_OPS__ */
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
new file mode 100644
index 00000000..ea0fd7aa
--- /dev/null
+++ b/drivers/net/qede/base/ecore_int.c
@@ -0,0 +1,2225 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_init_ops.h"
+#include "ecore_rt_defs.h"
+#include "ecore_int.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_hw_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_mcp.h"
+#include "ecore_attn_values.h"
+
+struct ecore_pi_info {
+ ecore_int_comp_cb_t comp_cb;
+ void *cookie; /* Will be sent to the compl cb function */
+};
+
+struct ecore_sb_sp_info {
+ struct ecore_sb_info sb_info;
+ /* per protocol index data */
+ struct ecore_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+enum ecore_attention_type {
+ ECORE_ATTN_TYPE_ATTN,
+ ECORE_ATTN_TYPE_PARITY,
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+struct aeu_invert_reg_bit {
+ char bit_name[30];
+
+#define ATTENTION_PARITY (1 << 0)
+
+#define ATTENTION_LENGTH_MASK (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT (4)
+#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
+ ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
+ ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT (12)
+
+#define ATTENTION_CLEAR_ENABLE (1 << 28)
+#define ATTENTION_FW_DUMP (1 << 29)
+#define ATTENTION_PANIC_DUMP (1 << 30)
+ unsigned int flags;
+
+ /* Callback to call if attention will be triggered */
+ enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
+
+ enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+ struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS (8)
+#define NUM_ATTN_REGS (9)
+
+static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+ DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
+#define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
+#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
+#define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
+#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
+#define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp =
+ ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_VALID);
+
+ /* Disabled VF access */
+ if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
+ u32 addr, data;
+
+ addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
+ data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_VF_DISABLED_ERROR_DATA);
+ DP_INFO(p_hwfn->p_dev,
+ "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
+ " Write [0x%02x] Addr [0x%08x]\n",
+ (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
+ >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
+ (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
+ >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
+ ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
+ ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
+ ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
+ addr);
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_VALID);
+ if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+ u32 addr, data, length;
+
+ addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+ data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_DATA);
+ length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+ DP_INFO(p_hwfn->p_dev,
+ "Incorrect access to %08x of length %08x - PF [%02x]"
+ " VF [%04x] [valid %02x] client [%02x] write [%02x]"
+ " Byte-Enable [%04x] [%08x]\n",
+ addr, length,
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
+ (u8)((data &
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
+ ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
+ data);
+ }
+
+ /* TODO - We know 'some' of these are legal due to virtualization,
+ * but is it true for all of them?
+ */
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
+#define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
+#define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
+#define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
+#define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
+#define ECORE_GRC_ATTENTION_PF_MASK (0xf)
+#define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
+#define ECORE_GRC_ATTENTION_VF_SHIFT (4)
+#define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
+#define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
+#define ECORE_GRC_ATTENTION_PRIV_VF (0)
+static const char *grc_timeout_attn_master_to_str(u8 master)
+{
+ switch (master) {
+ case 1:
+ return "PXP";
+ case 2:
+ return "MCP";
+ case 3:
+ return "MSDM";
+ case 4:
+ return "PSDM";
+ case 5:
+ return "YSDM";
+ case 6:
+ return "USDM";
+ case 7:
+ return "TSDM";
+ case 8:
+ return "XSDM";
+ case 9:
+ return "DBU";
+ case 10:
+ return "DMAE";
+ default:
+ return "Unknown";
+ }
+}
+
+static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp, tmp2;
+
+ /* We've already cleared the timeout interrupt register, so we learn
+ * of interrupts via the validity register
+ */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+ if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
+ goto out;
+
+ /* Read the GRC timeout information */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+ tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+ DP_INFO(p_hwfn->p_dev,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
+ " [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
+ (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
+ grc_timeout_attn_master_to_str((tmp &
+ ECORE_GRC_ATTENTION_MASTER_MASK) >>
+ ECORE_GRC_ATTENTION_MASTER_SHIFT),
+ (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
+ (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
+ ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
+ ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
+ (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
+ ECORE_GRC_ATTENTION_VF_SHIFT);
+
+out:
+ /* Regardles of anything else, clean the validity bit */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
+#define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
+#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
+#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
+#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
+#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
+#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
+static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp, reg_addr;
+
+ reg_addr =
+ attn_blocks[BLOCK_PGLUE_B].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+ int_regs[0]->mask_addr;
+
+ /* Mask unnecessary attentions -@TBD move to MFW */
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
+ tmp |= (1 << 19); /* Was PGL_PCIE_ATTN */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal write by chip to [%08x:%08x] blocked."
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
+ " Details2 %08x [Was_error %02x BME deassert %02x"
+ " FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
+ ? 1 : 0), tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+ : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
+ 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+ : 0));
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal read by chip from [%08x:%08x] blocked."
+ " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
+ " Details2 %08x [Was_error %02x BME deassert %02x"
+ " FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
+ ? 1 : 0), tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+ : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
+ 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+ : 0));
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
+ DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
+ u32 addr_hi, addr_lo;
+
+ addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+ DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
+ }
+
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
+ u32 addr_hi, addr_lo, details;
+
+ addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+ addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+ details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "ILT error - Details %08x Details2 %08x"
+ " [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
+ }
+
+ /* Clear the indications */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_nig_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 tmp, reg_addr;
+
+ /* Mask unnecessary attentions -@TBD move to MFW */
+ reg_addr =
+ attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+ int_regs[3]->mask_addr;
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
+ tmp |= (1 << 0); /* Was 3_P0_TX_PAUSE_TOO_LONG_INT */
+ tmp |= NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT;
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
+
+ reg_addr =
+ attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+ int_regs[5]->mask_addr;
+ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
+ tmp |= (1 << 0); /* Was 5_P1_TX_PAUSE_TOO_LONG_INT */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
+
+ /* TODO - a bit risky to return success here; But alternative is to
+ * actually read the multitdue of interrupt register of the block.
+ */
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
+{
+ DP_NOTICE(p_hwfn, false, "FW assertion!\n");
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
+
+ return ECORE_INVAL;
+}
+
+static enum _ecore_status_t
+ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
+{
+ DP_INFO(p_hwfn, "General attention 35!\n");
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
+
+static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ u32 reason;
+
+ reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
+ ECORE_DORQ_ATTENTION_REASON_MASK;
+ if (reason) {
+ u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS);
+
+ DP_INFO(p_hwfn->p_dev,
+ "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
+ " Size [bytes] 0x%08x Reason: 0x%08x\n",
+ ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS),
+ (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
+ ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
+ ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
+ }
+
+ return ECORE_INVAL;
+}
+
+static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
+ u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ TM_REG_INT_STS_1);
+
+ if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
+ TM_REG_INT_STS_1_PEND_CONN_SCAN))
+ return ECORE_INVAL;
+
+ if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
+ TM_REG_INT_STS_1_PEND_CONN_SCAN))
+ DP_INFO(p_hwfn,
+ "TM attention on emulation - most likely"
+ " results of clock-ratios\n");
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
+ val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
+ TM_REG_INT_MASK_1_PEND_TASK_SCAN;
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
+
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ return ECORE_INVAL;
+}
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+ {
+ { /* After Invert 1 */
+ {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 2 */
+ {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
+ BLOCK_PGLUE_B},
+ {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"SW timers #%d",
+ (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
+ OSAL_NULL, MAX_BLOCK_ID},
+ {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ BLOCK_PGLCS},
+ }
+ },
+
+ {
+ { /* After Invert 3 */
+ {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 4 */
+ {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+ ecore_fw_assertion, MAX_BLOCK_ID},
+ {"General Attention %d",
+ (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
+ OSAL_NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+ ecore_general_attention_35, MAX_BLOCK_ID},
+ {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ BLOCK_CNIG},
+ {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
+ {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ {"NIG", ATTENTION_PAR_INT, ecore_nig_attn_cb, BLOCK_NIG},
+ {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
+ {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
+ {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
+ {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
+ }
+ },
+
+ {
+ { /* After Invert 5 */
+ {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
+ {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
+ {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
+ {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
+ {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
+ {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
+ {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
+ {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
+ {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
+ {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
+ {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
+ {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
+ {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
+ {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
+ {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
+ {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
+ }
+ },
+
+ {
+ { /* After Invert 6 */
+ {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
+ {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
+ {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
+ {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
+ {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
+ {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
+ {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
+ {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
+ {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
+ {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
+ {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
+ {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
+ {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
+ {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
+ {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
+ {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
+ }
+ },
+
+ {
+ { /* After Invert 7 */
+ {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
+ {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
+ {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
+ {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
+ {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+ {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
+ {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
+ {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
+ {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
+ {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
+ {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
+ {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
+ {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
+ {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
+ {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
+ {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
+ }
+ },
+
+ {
+ { /* After Invert 8 */
+ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
+ {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
+ {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
+ {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
+ {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
+ {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
+ {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
+ {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
+ {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
+ {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
+ {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
+ {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+ {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 9 */
+ {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
+ MAX_BLOCK_ID},
+ {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+ MAX_BLOCK_ID},
+ }
+ },
+
+};
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE (0x3ff)
+struct ecore_sb_attn_info {
+ /* Virtual & Physical address of the SB */
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
+
+ /* Last seen running index */
+ u16 index;
+
+ /* A mask of the AEU bits resulting in a parity error */
+ u32 parity_mask[NUM_ATTN_REGS];
+
+ /* A pointer to the attention description structure */
+ struct aeu_invert_reg *p_aeu_desc;
+
+ /* Previously asserted attentions, which are still unasserted */
+ u16 known_attn;
+
+ /* Cleanup address for the link's general hw attention */
+ u32 mfw_attn_addr;
+};
+
+static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_attn_info *p_sb_desc)
+{
+ u16 rc = 0, index;
+
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
+ if (p_sb_desc->index != index) {
+ p_sb_desc->index = index;
+ rc = ECORE_SB_ATT_IDX;
+ }
+
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ return rc;
+}
+
+/**
+ * @brief ecore_int_assertion - handles asserted attention bits
+ *
+ * @param p_hwfn
+ * @param asserted_bits newly asserted bits
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
+ u16 asserted_bits)
+{
+ struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 igu_mask;
+
+ /* Mask the source of the attention in the IGU */
+ igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+ igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+ igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "inner known ATTN state: 0x%04x --> 0x%04x\n",
+ sb_attn_sw->known_attn,
+ sb_attn_sw->known_attn | asserted_bits);
+ sb_attn_sw->known_attn |= asserted_bits;
+
+ /* Handle MCP events */
+ if (asserted_bits & 0x100) {
+ ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+ /* Clean the MCP attention */
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ sb_attn_sw->mfw_attn_addr, 0);
+ }
+
+ /* FIXME - this will change once we'll have GOOD gtt definitions */
+ DIRECT_REG_WR(p_hwfn,
+ (u8 OSAL_IOMEM *) p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_SET_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
+ asserted_bits);
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_deassertion_print_bit(struct ecore_hwfn *p_hwfn,
+ struct attn_hw_reg *p_reg_desc,
+ struct attn_hw_block *p_block,
+ enum ecore_attention_type type,
+ u32 val, u32 mask)
+{
+ int j;
+#ifdef ATTN_DESC
+ const char **description;
+
+ if (type == ECORE_ATTN_TYPE_ATTN)
+ description = p_block->int_desc;
+ else
+ description = p_block->prty_desc;
+#endif
+
+ for (j = 0; j < p_reg_desc->num_of_bits; j++) {
+ if (val & (1 << j)) {
+#ifdef ATTN_DESC
+ DP_NOTICE(p_hwfn, false,
+ "%s (%s): %s [reg %d [0x%08x], bit %d]%s\n",
+ p_block->name,
+ type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
+ "Parity",
+ description[p_reg_desc->bit_attn_idx[j]],
+ p_reg_desc->reg_idx,
+ p_reg_desc->sts_addr, j,
+ (mask & (1 << j)) ? " [MASKED]" : "");
+#else
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "%s (%s): [reg %d [0x%08x], bit %d]%s\n",
+ p_block->name,
+ type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
+ "Parity",
+ p_reg_desc->reg_idx,
+ p_reg_desc->sts_addr, j,
+ (mask & (1 << j)) ? " [MASKED]" : "");
+#endif
+ }
+ }
+}
+
+/**
+ * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
+ * cause of the attention
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the attention
+ * @param aeu_en_reg - register offset of the AEU enable reg. which configured
+ * this bit to this group.
+ * @param bit_index - index of this bit in the aeu_en_reg
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg, u32 bitmask)
+{
+ enum _ecore_status_t rc = ECORE_INVAL;
+ u32 val, mask;
+
+#ifndef REMOVE_DBG
+ u32 interrupts[20]; /* TODO- change into HSI define once supplied */
+
+ OSAL_MEMSET(interrupts, 0, sizeof(u32) * 20); /* FIXME real size) */
+#endif
+
+ DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+ p_aeu->bit_name, bitmask);
+
+ /* Call callback before clearing the interrupt status */
+ if (p_aeu->cb) {
+ DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+ p_aeu->bit_name);
+ rc = p_aeu->cb(p_hwfn);
+ }
+
+ /* Handle HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID) {
+ u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
+ struct attn_hw_block *p_block;
+ int i;
+
+ p_block = &attn_blocks[p_aeu->block_index];
+
+ /* Handle each interrupt register */
+ for (i = 0;
+ i < p_block->chip_regs[chip_type].num_of_int_regs; i++) {
+ struct attn_hw_reg *p_reg_desc;
+ u32 sts_addr;
+
+ p_reg_desc = p_block->chip_regs[chip_type].int_regs[i];
+
+ /* In case of fatal attention, don't clear the status
+ * so it would appear in idle check.
+ */
+ if (rc == ECORE_SUCCESS)
+ sts_addr = p_reg_desc->sts_clr_addr;
+ else
+ sts_addr = p_reg_desc->sts_addr;
+
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
+ mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->mask_addr);
+ ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+ p_block,
+ ECORE_ATTN_TYPE_ATTN,
+ val, mask);
+
+#ifndef REMOVE_DBG
+ interrupts[i] = val;
+#endif
+ }
+ }
+
+ /* Reach assertion if attention is fatal */
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
+ p_aeu->bit_name);
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
+ }
+
+ /* Prevent this Attention from being asserted in the future */
+ if (p_aeu->flags & ATTENTION_CLEAR_ENABLE) {
+ u32 val;
+ u32 mask = ~bitmask;
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
+ DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ p_aeu->bit_name);
+ }
+
+ if (p_aeu->flags & (ATTENTION_FW_DUMP | ATTENTION_PANIC_DUMP)) {
+ /* @@@TODO - what to dump? <yuvalmin 04/02/13> */
+ DP_ERR(p_hwfn->p_dev, "`%s' - Dumps aren't implemented yet\n",
+ p_aeu->bit_name);
+ return ECORE_NOTIMPL;
+ }
+
+ return rc;
+}
+
+static void ecore_int_parity_print(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ struct attn_hw_block *p_block, u8 bit_index)
+{
+ u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
+ int i;
+
+ for (i = 0; i < p_block->chip_regs[chip_type].num_of_prty_regs; i++) {
+ struct attn_hw_reg *p_reg_desc;
+ u32 val, mask;
+
+ p_reg_desc = p_block->chip_regs[chip_type].prty_regs[i];
+
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->sts_clr_addr);
+ mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->mask_addr);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "%s[%d] - parity register[%d] is %08x [mask is %08x]\n",
+ p_aeu->bit_name, bit_index, i, val, mask);
+ ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+ p_block,
+ ECORE_ATTN_TYPE_PARITY,
+ val, mask);
+ }
+}
+
+/**
+ * @brief ecore_int_deassertion_parity - handle a single parity AEU source
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the
+ * parity
+ * @param bit_index
+ */
+static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u8 bit_index)
+{
+ u32 block_id = p_aeu->block_index;
+
+ DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
+ p_aeu->bit_name, bit_index);
+
+ if (block_id != MAX_BLOCK_ID) {
+ ecore_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
+ bit_index);
+
+ /* In A0, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ ecore_int_parity_print(p_hwfn, p_aeu,
+ &attn_blocks[BLOCK_OPTE],
+ bit_index);
+ ecore_int_parity_print(p_hwfn, p_aeu,
+ &attn_blocks[BLOCK_MCP],
+ bit_index);
+ }
+ }
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return enum _ecore_status_t
+ *
+ */
+static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
+ u16 deasserted_bits)
+{
+ struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+ bool b_parity = false;
+ u8 i, j, k, bit_idx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* Read the attention registers in the AEU */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_AFTER_INVERT_1_IGU +
+ i * 0x4);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
+ }
+
+ /* Handle parity attentions first */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+ u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32));
+
+ u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+
+ /* Skip register in which no parity bit is currently set */
+ if (!parities)
+ continue;
+
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+ if ((p_bit->flags & ATTENTION_PARITY) &&
+ !!(parities & (1 << bit_idx))) {
+ ecore_int_deassertion_parity(p_hwfn, p_bit,
+ bit_idx);
+ b_parity = true;
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_bit->flags);
+ }
+ }
+
+ /* Find non-parity cause for attention and act */
+ for (k = 0; k < MAX_ATTN_GRPS; k++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ /* Handle only groups whose attention is currently deasserted */
+ if (!(deasserted_bits & (1 << k)))
+ continue;
+
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
+ u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ u32 bits = aeu_inv_arr[i] & en;
+
+ /* Skip if no bit from this group is currently set */
+ if (!bits)
+ continue;
+
+ /* Find all set bits from current register which belong
+ * to current group, making them responsible for the
+ * previous assertion.
+ */
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ u8 bit, bit_len;
+ u32 bitmask;
+
+ p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+
+ /* No need to handle attention-only bits */
+ if (p_aeu->flags == ATTENTION_PAR)
+ continue;
+
+ bit = bit_idx;
+ bit_len = ATTENTION_LENGTH(p_aeu->flags);
+ if (p_aeu->flags & ATTENTION_PAR_INT) {
+ /* Skip Parity */
+ bit++;
+ bit_len--;
+ }
+
+ bitmask = bits & (((1 << bit_len) - 1) << bit);
+ if (bitmask) {
+ /* Handle source of the attention */
+ ecore_int_deassertion_aeu_bit(p_hwfn,
+ p_aeu,
+ aeu_en,
+ bitmask);
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ }
+ }
+
+ /* Clear IGU indication for the deasserted bits */
+ /* FIXME - this will change once we'll have GOOD gtt definitions */
+ DIRECT_REG_WR(p_hwfn,
+ (u8 OSAL_IOMEM *) p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
+
+ /* Unmask deasserted attentions in IGU */
+ aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ IGU_REG_ATTENTION_ENABLE);
+ aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+ /* Clear deassertion from inner state */
+ sb_attn_sw->known_attn &= ~deasserted_bits;
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+ struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+ u16 index = 0, asserted_bits, deasserted_bits;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 attn_bits = 0, attn_acks = 0;
+
+ /* Read current attention bits/acks - safeguard against attentions
+ * by guaranting work on a synchronized timeframe
+ */
+ do {
+ index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
+ attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
+ attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
+ } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
+ p_sb_attn->sb_index = index;
+
+ /* Attention / Deassertion are meaningful (and in correct state)
+ * only when they differ and consistent with known state - deassertion
+ * when previous attention & current ack, and assertion when current
+ * attention with no previous attention
+ */
+ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+ ~p_sb_attn_sw->known_attn;
+ deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+ p_sb_attn_sw->known_attn;
+
+ if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
+ DP_INFO(p_hwfn,
+ "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+ index, attn_bits, attn_acks, asserted_bits,
+ deasserted_bits, p_sb_attn_sw->known_attn);
+ else if (asserted_bits == 0x100)
+ DP_INFO(p_hwfn, "MFW indication via attention\n");
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "MFW indication [deassertion]\n");
+
+ if (asserted_bits) {
+ rc = ecore_int_assertion(p_hwfn, asserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ if (deasserted_bits)
+ rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
+
+ return rc;
+}
+
+static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM *igu_addr, u32 ack_cons)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_ATTN <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+ OSAL_BARRIER(p_hwfn->p_dev);
+}
+
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
+{
+ struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
+ struct ecore_pi_info *pi_info = OSAL_NULL;
+ struct ecore_sb_attn_info *sb_attn;
+ struct ecore_sb_info *sb_info;
+ static int arr_size;
+ u16 rc = 0;
+
+ if (!p_hwfn) {
+ DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sp_sb) {
+ DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
+ return;
+ }
+
+ sb_info = &p_hwfn->p_sp_sb->sb_info;
+ arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+ if (!sb_info) {
+ DP_ERR(p_hwfn->p_dev,
+ "Status block is NULL - cannot ack interrupts\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sb_attn) {
+ DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
+ return;
+ }
+ sb_attn = p_hwfn->p_sb_attn;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+ p_hwfn, p_hwfn->my_id);
+
+ /* Disable ack for def status block. Required both for msix +
+ * inta in non-mask mode, in inta does no harm.
+ */
+ ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+ /* Gather Interrupts/Attentions information */
+ if (!sb_info->sb_virt) {
+ DP_ERR(p_hwfn->p_dev,
+ "Interrupt Status block is NULL -"
+ " cannot check for new interrupts!\n");
+ } else {
+ u32 tmp_index = sb_info->sb_ack;
+ rc = ecore_sb_update_sb_idx(sb_info);
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+ "Interrupt indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_info->sb_ack);
+ }
+
+ if (!sb_attn || !sb_attn->sb_attn) {
+ DP_ERR(p_hwfn->p_dev,
+ "Attentions Status block is NULL -"
+ " cannot check for new attentions!\n");
+ } else {
+ u16 tmp_index = sb_attn->index;
+
+ rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+ "Attention indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_attn->index);
+ }
+
+ /* Check if we expect interrupts at this time. if not just ack them */
+ if (!(rc & ECORE_SB_EVENT_MASK)) {
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+ if (!p_hwfn->p_dpc_ptt) {
+ DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ if (rc & ECORE_SB_ATT_IDX)
+ ecore_int_attentions(p_hwfn);
+
+ if (rc & ECORE_SB_IDX) {
+ int pi;
+
+ /* Since we only looked at the SB index, it's possible more
+ * than a single protocol-index on the SB incremented.
+ * Iterate over all configured protocol indices and check
+ * whether something happened for each.
+ */
+ for (pi = 0; pi < arr_size; pi++) {
+ pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+ if (pi_info->comp_cb != OSAL_NULL)
+ pi_info->comp_cb(p_hwfn, pi_info->cookie);
+ }
+ }
+
+ if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
+ /* This should be done before the interrupts are enabled,
+ * since otherwise a new attention will be generated.
+ */
+ ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+ }
+
+ ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_attn) {
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
+ p_sb->sb_phys,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn));
+ }
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+ sb_info->index = 0;
+ sb_info->known_attn = 0;
+
+ /* Configure Attention Status Block in IGU */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+ DMA_LO(p_hwfn->p_sb_attn->sb_phys));
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+ DMA_HI(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *sb_virt_addr, dma_addr_t sb_phy_addr)
+{
+ struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+ int i, j, k;
+
+ sb_info->sb_attn = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ /* Set the pointer to the AEU descriptors */
+ sb_info->p_aeu_desc = aeu_descs;
+
+ /* Calculate Parity Masks */
+ OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ /* j is array index, k is bit index */
+ for (j = 0, k = 0; k < 32; j++) {
+ unsigned int flags = aeu_descs[i].bits[j].flags;
+
+ if (flags & ATTENTION_PARITY)
+ sb_info->parity_mask[i] |= 1 << k;
+
+ k += ATTENTION_LENGTH(flags);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Attn Mask [Reg %d]: 0x%08x\n",
+ i, sb_info->parity_mask[i]);
+ }
+
+ /* Set the address of cleanup for the mcp attention */
+ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+ MISC_REG_AEU_GENERAL_ATTN_0;
+
+ ecore_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_sb_attn_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(struct ecore_sb_attn_info));
+ if (!p_sb) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate `struct ecore_sb_attn_info'");
+ return ECORE_NOMEM;
+ }
+
+ /* SB ring */
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn));
+ if (!p_virt) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate status block (attentions)");
+ OSAL_FREE(p_dev, p_sb);
+ return ECORE_NOMEM;
+ }
+
+ /* Attention setup */
+ p_hwfn->p_sb_attn = p_sb;
+ ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+ return ECORE_SUCCESS;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#ifdef RTE_LIBRTE_QEDE_RX_COAL_US
+#define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
+#else
+#define ECORE_CAU_DEF_RX_USECS 24
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_TX_COAL_US
+#define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
+#else
+#define ECORE_CAU_DEF_TX_USECS 48
+#endif
+
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id, u16 vf_number, u8 vf_valid)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 cau_state;
+
+ OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+ /* setting the time resultion to a fixed value ( = 1) */
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+ ECORE_CAU_DEF_RX_TIMER_RES);
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+ ECORE_CAU_DEF_TX_TIMER_RES);
+
+ cau_state = CAU_HC_DISABLE_STATE;
+
+ if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+ cau_state = CAU_HC_ENABLE_STATE;
+ if (!p_dev->rx_coalesce_usecs) {
+ p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
+ DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
+ p_dev->rx_coalesce_usecs);
+ }
+ if (!p_dev->tx_coalesce_usecs) {
+ p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
+ DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
+ p_dev->tx_coalesce_usecs);
+ }
+ }
+
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+ SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t sb_phys, u16 igu_sb_id,
+ u16 vf_number, u8 vf_valid)
+{
+ struct cau_sb_entry sb_entry;
+
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+ vf_number, vf_valid);
+
+ if (p_hwfn->hw_init_done) {
+ /* Wide-bus, initialize via DMAE */
+ u64 phys_addr = (u64)sb_phys;
+
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&phys_addr,
+ CAU_REG_SB_ADDR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
+ } else {
+ /* Initialize Status Block Address */
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2, sb_phys);
+
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2, sb_entry);
+ }
+
+ /* Configure pi coalescing if set */
+ if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+ u8 num_tc = 1; /* @@@TBD aelior ECORE_MULTI_COS */
+ u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
+ (ECORE_CAU_DEF_RX_TIMER_RES + 1);
+ u8 i;
+
+ ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ ECORE_COAL_RX_STATE_MACHINE, timeset);
+
+ timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
+ (ECORE_CAU_DEF_TX_TIMER_RES + 1);
+
+ for (i = 0; i < num_tc; i++) {
+ ecore_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ ECORE_COAL_TX_STATE_MACHINE,
+ timeset);
+ }
+ }
+}
+
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id, u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset, pi_offset;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return; /* @@@TBD MichalK- VF CAU... */
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ ecore_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
+{
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ if (IS_PF(p_hwfn->p_dev))
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief ecore_get_igu_sb_id - given a sw sb_id return the
+ * igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+ u16 igu_sb_id;
+
+ /* Assuming continuous set of IGU SBs dedicated for given PF */
+ if (sb_id == ECORE_SP_SB_ID)
+ igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ else if (IS_PF(p_hwfn->p_dev))
+ igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ else
+ igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id == ECORE_SP_SB_ID)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
+
+ return igu_sb_id;
+}
+
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id)
+{
+ sb_info->sb_virt = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id != ECORE_SP_SB_ID) {
+ p_hwfn->sbs_info[sb_id] = sb_info;
+ p_hwfn->num_sbs++;
+ }
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ sb_info->p_hwfn = p_hwfn;
+#endif
+ sb_info->p_dev = p_hwfn->p_dev;
+
+ /* The igu address will hold the absolute address that needs to be
+ * written to for a specific status block
+ */
+ if (IS_PF(p_hwfn->p_dev)) {
+ sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
+
+ } else {
+ sb_info->igu_addr =
+ (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_IGU +
+ ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
+ }
+
+ sb_info->flags |= ECORE_SB_INFO_INIT;
+
+ ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_info *sb_info,
+ u16 sb_id)
+{
+ if (sb_id == ECORE_SP_SB_ID) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return ECORE_INVAL;
+ }
+
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
+ p_hwfn->sbs_info[sb_id] = OSAL_NULL;
+ p_hwfn->num_sbs--;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_info.sb_virt) {
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys,
+ SB_ALIGNED_SIZE(p_hwfn));
+ }
+
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_sp_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb =
+ OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_sb_sp_info));
+ if (!p_sb) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_sb_info'");
+ return ECORE_NOMEM;
+ }
+
+ /* SB ring */
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_phys, SB_ALIGNED_SIZE(p_hwfn));
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
+ OSAL_FREE(p_hwfn->p_dev, p_sb);
+ return ECORE_NOMEM;
+ }
+
+ /* Status Block setup */
+ p_hwfn->p_sp_sb = p_sb;
+ ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
+ p_virt, p_phys, ECORE_SP_SB_ID);
+
+ OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+ ecore_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx, __le16 **p_fw_cons)
+{
+ struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ enum _ecore_status_t rc = ECORE_NOMEM;
+ u8 pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
+ continue;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ rc = ECORE_SUCCESS;
+ break;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
+{
+ struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+
+ if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
+ return ECORE_SUCCESS;
+}
+
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
+ else
+#endif
+ igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
+
+ p_hwfn->p_dev->int_mode = int_mode;
+ switch (p_hwfn->p_dev->int_mode) {
+ case ECORE_INT_MODE_INTA:
+ igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case ECORE_INT_MODE_MSI:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case ECORE_INT_MODE_MSIX:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ break;
+ case ECORE_INT_MODE_POLL:
+ break;
+ }
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn,
+ "FPGA - Don't enable Attentions in IGU and MISC\n");
+ return;
+ }
+#endif
+
+ /* Configure AEU signal change to produce attentions */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
+
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ /* Unmask AEU signals toward IGU */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+enum _ecore_status_t
+ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 tmp, reg_addr;
+
+ /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
+ tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
+ tmp |= 0xf;
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
+
+ /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
+ * attentions. Since we're waiting for BRCM answer regarding this
+ * attention, in the meanwhile we simply mask it.
+ */
+ tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
+ tmp &= ~0x800;
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
+
+ /* @@@tmp - Mask interrupt sources - should move to init tool;
+ * Also, correct for A0 [might still change in B0.
+ */
+ reg_addr =
+ attn_blocks[BLOCK_BRB].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+ int_regs[0]->mask_addr;
+ tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
+ tmp |= (1 << 21); /* Was PKT4_LEN_ERROR */
+ ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
+
+ ecore_int_igu_enable_attn(p_hwfn, p_ptt);
+
+ if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+ rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Slowpath IRQ request failed\n");
+ return ECORE_NORESOURCES;
+ }
+ p_hwfn->b_int_requested = true;
+ }
+
+ /* Enable interrupt Generation */
+ ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+ p_hwfn->b_int_enabled = 1;
+
+ return rc;
+}
+
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ p_hwfn->b_int_enabled = 0;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return;
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH (1000)
+void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 sb_id, bool cleanup_set, u16 opaque_fid)
+{
+ u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+ u8 type = 0; /* FIXME MichalS type??? */
+
+ OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
+ IGU_REG_CLEANUP_STATUS_0) != 0x200);
+
+ /* USE Control Command Register to perform cleanup. There is an
+ * option to do this using IGU bar, but then it can't be used for VFs.
+ */
+
+ /* Set the data field */
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
+ SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+ /* Set the control register */
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+ OSAL_BARRIER(p_hwfn->p_dev);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ /* calculate where to read the status bit from */
+ sb_bit = 1 << (sb_id % 32);
+ sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+ sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
+
+ /* Now wait for the command to complete */
+ while (--sleep_cnt) {
+ val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
+ if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+ break;
+ OSAL_MSLEEP(5);
+ }
+
+ if (!sleep_cnt)
+ DP_NOTICE(p_hwfn, true,
+ "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+ val, sb_id);
+}
+
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 sb_id, u16 opaque, bool b_set)
+{
+ int pi;
+
+ /* Set */
+ if (b_set)
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+ /* Clear */
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+ /* Clear the CAU for the SB */
+ for (pi = 0; pi < 12; pi++)
+ ecore_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_set, bool b_slowpath)
+{
+ u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+ u32 sb_id = 0, val = 0;
+
+ /* @@@TBD MichalK temporary... should be moved to init-tool... */
+ val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+ val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+ val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+ /* end temporary */
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU cleaning SBs [%d,...,%d]\n",
+ igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+ for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+
+ if (!b_slowpath)
+ return;
+
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid, b_set);
+}
+
+static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 sb_id)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ struct ecore_igu_block *p_block;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ goto out;
+
+ /* Fill the block information */
+ p_block->status = ECORE_IGU_STATUS_VALID;
+ p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
+ " is_pf = %d vector_num = 0x%x\n",
+ sb_id, val, p_block->function_id, p_block->is_pf,
+ p_block->vector_number);
+
+out:
+ return val;
+}
+
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_igu_info *p_igu_info;
+ struct ecore_igu_block *p_block;
+ u16 sb_id, last_iov_sb_id = 0;
+ u32 min_vf, max_vf, val;
+ u16 prev_sb_id = 0xFF;
+
+ p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
+ GFP_KERNEL,
+ sizeof(*p_igu_info));
+ if (!p_hwfn->hw_info.p_igu_info)
+ return ECORE_NOMEM;
+
+ OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Initialize base sb / sb cnt for PFs and VFs */
+ p_igu_info->igu_base_sb = 0xffff;
+ p_igu_info->igu_sb_cnt = 0;
+ p_igu_info->igu_dsb_id = 0xffff;
+ p_igu_info->igu_base_sb_iov = 0xffff;
+
+#ifdef CONFIG_ECORE_SRIOV
+ min_vf = p_hwfn->hw_info.first_vf_in_pf;
+ max_vf = p_hwfn->hw_info.first_vf_in_pf +
+ p_hwfn->p_dev->sriov_info.total_vfs;
+#else
+ min_vf = 0;
+ max_vf = 0;
+#endif
+
+ for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ sb_id++) {
+ p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ break;
+
+ if (p_block->is_pf) {
+ if (p_block->function_id == p_hwfn->rel_pf_id) {
+ p_block->status |= ECORE_IGU_STATUS_PF;
+
+ if (p_block->vector_number == 0) {
+ if (p_igu_info->igu_dsb_id == 0xffff)
+ p_igu_info->igu_dsb_id = sb_id;
+ } else {
+ if (p_igu_info->igu_base_sb == 0xffff) {
+ p_igu_info->igu_base_sb = sb_id;
+ } else if (prev_sb_id != sb_id - 1) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "consecutive igu"
+ " vectors for HWFN"
+ " %x broken",
+ p_hwfn->rel_pf_id);
+ break;
+ }
+ prev_sb_id = sb_id;
+ /* we don't count the default */
+ (p_igu_info->igu_sb_cnt)++;
+ }
+ }
+ } else {
+ if ((p_block->function_id >= min_vf) &&
+ (p_block->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ if (p_igu_info->igu_base_sb_iov == 0xffff) {
+ p_igu_info->igu_base_sb_iov = sb_id;
+ } else if (last_iov_sb_id != sb_id - 1) {
+ if (!val)
+ DP_VERBOSE(p_hwfn->p_dev,
+ ECORE_MSG_INTR,
+ "First uninited IGU"
+ " CAM entry at"
+ " index 0x%04x\n",
+ sb_id);
+ else
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Consecutive igu"
+ " vectors for HWFN"
+ " %x vfs is broken"
+ " [jumps from %04x"
+ " to %04x]\n",
+ p_hwfn->rel_pf_id,
+ last_iov_sb_id,
+ sb_id);
+ break;
+ }
+ p_block->status |= ECORE_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ last_iov_sb_id = sb_id;
+ }
+ }
+ }
+ p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
+ "igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
+ p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
+ p_igu_info->igu_dsb_id);
+
+ if (p_igu_info->igu_base_sb == 0xffff ||
+ p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU CAM returned invalid values igu_base_sb=0x%x "
+ "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_dsb_id);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+#define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
+ IGU_CMD_INT_ACK_BASE)
+#define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
+ IGU_CMD_INT_ACK_BASE)
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
+{
+ u32 intr_status_hi = 0, intr_status_lo = 0;
+ u64 intr_status = 0;
+
+ intr_status_lo = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ LSB_IGU_CMD_ADDR * 8);
+ intr_status_hi = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ MSB_IGU_CMD_ADDR * 8);
+ intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+ return intr_status;
+}
+
+static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
+ p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
+{
+ p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
+ if (!p_hwfn->sp_dpc)
+ return ECORE_NOMEM;
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
+}
+
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_int_sp_dpc_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
+ return rc;
+ }
+
+ rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
+ return rc;
+ }
+
+ rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
+
+ return rc;
+}
+
+void ecore_int_free(struct ecore_hwfn *p_hwfn)
+{
+ ecore_int_sp_sb_free(p_hwfn);
+ ecore_int_sb_attn_free(p_hwfn);
+ ecore_int_sp_dpc_free(p_hwfn);
+}
+
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
+ return;
+
+ ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ ecore_int_sb_attn_setup(p_hwfn, p_ptt);
+ ecore_int_sp_dpc_setup(p_hwfn);
+}
+
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_cnt_info *p_sb_cnt_info)
+{
+ struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+ if (!info || !p_sb_cnt_info)
+ return;
+
+ p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
+ p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
+ p_sb_cnt_info->sb_free_blk = info->free_blks;
+}
+
+u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Determine origin of SB id */
+ if ((sb_id >= p_info->igu_base_sb) &&
+ (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+ return sb_id - p_info->igu_base_sb;
+ } else if ((sb_id >= p_info->igu_base_sb_iov) &&
+ (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+ return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+ }
+
+ DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
+ sb_id);
+ return 0;
+}
+
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
+{
+ int i;
+
+ for_each_hwfn(p_dev, i)
+ p_dev->hwfns[i].b_int_requested = false;
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
new file mode 100644
index 00000000..17c95213
--- /dev/null
+++ b/drivers/net/qede/base/ecore_int.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INT_H__
+#define __ECORE_INT_H__
+
+#include "ecore.h"
+#include "ecore_int_api.h"
+
+#define ECORE_CAU_DEF_RX_TIMER_RES 0
+#define ECORE_CAU_DEF_TX_TIMER_RES 0
+
+#define ECORE_SB_ATT_IDX 0x0001
+#define ECORE_SB_EVENT_MASK 0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct ecore_igu_block {
+ u8 status;
+#define ECORE_IGU_STATUS_FREE 0x01
+#define ECORE_IGU_STATUS_VALID 0x02
+#define ECORE_IGU_STATUS_PF 0x04
+
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+};
+
+struct ecore_igu_map {
+ struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct ecore_igu_info {
+ struct ecore_igu_map igu_map;
+ u16 igu_dsb_id;
+ u16 igu_base_sb;
+ u16 igu_base_sb_iov;
+ u16 igu_sb_cnt;
+ u16 igu_sb_cnt_iov;
+ u16 free_blks;
+};
+
+/* TODO Names of function may change... */
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_set, bool b_slowpath);
+
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_int_igu_read_cam - Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn,
+ void *cookie);
+/**
+ * @brief ecore_int_register_cb - Register callback func for
+ * slowhwfn statusblock.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ * interrupt on the sp sb
+ *
+ * @param cookie - passed to the callback function
+ * @param sb_idx - OUT parameter which gives the chosen index
+ * for this protocol.
+ * @param p_fw_cons - pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+ ecore_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx, __le16 **p_fw_cons);
+/**
+ * @brief ecore_int_unregister_cb - Unregisters callback
+ * function from sp sb.
+ * Partner of ecore_int_register_cb -> should be called
+ * when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi);
+
+/**
+ * @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param cleanup_set - set(1) / clear(0)
+ * @param opaque_fid - the function for which to perform
+ * cleanup, for example a PF on behalf of
+ * its VFs.
+ */
+void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 sb_id, bool cleanup_set, u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ * block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - igu status block id
+ * @param opaque - opaque fid of the sb owner.
+ * @param cleanup_set - set(1) / clear(0)
+ */
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 sb_id, u16 opaque, bool b_set);
+
+/**
+ * @brief ecore_int_cau_conf - configure cau for a given status
+ * block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id, u16 vf_number, u8 vf_valid);
+
+/**
+* @brief ecore_int_alloc
+*
+* @param p_hwfn
+ * @param p_ptt
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+* @brief ecore_int_free
+*
+* @param p_hwfn
+*/
+void ecore_int_free(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_int_setup
+*
+* @param p_hwfn
+* @param p_ptt
+*/
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ *
+* @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry, u8 pf_id,
+ u16 vf_number, u8 vf_valid);
+
+#ifndef ASIC_ONLY
+#define ECORE_MAPPING_MEMORY_SIZE(dev) \
+ ((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \
+ 136 : NUM_OF_SBS(dev))
+#else
+#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
+#endif
+
+#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
new file mode 100644
index 00000000..f6db807e
--- /dev/null
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INT_API_H__
+#define __ECORE_INT_API_H__
+
+#ifndef __EXTRACT__LINUX__
+#define ECORE_SB_IDX 0x0002
+
+#define RX_PI 0
+#define TX_PI(tc) (RX_PI + 1 + tc)
+
+#ifndef ECORE_INT_MODE
+#define ECORE_INT_MODE
+enum ecore_int_mode {
+ ECORE_INT_MODE_INTA,
+ ECORE_INT_MODE_MSIX,
+ ECORE_INT_MODE_MSI,
+ ECORE_INT_MODE_POLL,
+};
+#endif
+
+struct ecore_sb_info {
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ u32 sb_ack; /* Last given ack */
+ u16 igu_sb_id;
+ void OSAL_IOMEM *igu_addr;
+ u8 flags;
+#define ECORE_SB_INFO_INIT 0x1
+#define ECORE_SB_INFO_SETUP 0x2
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ struct ecore_hwfn *p_hwfn;
+#endif
+ struct ecore_dev *p_dev;
+};
+
+struct ecore_sb_cnt_info {
+ int sb_cnt;
+ int sb_iov_cnt;
+ int sb_free_blk;
+};
+
+static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
+{
+ u32 prod = 0;
+ u16 rc = 0;
+
+ /* barrier(); status block is written to by the chip */
+ /* FIXME: need some sort of barrier. */
+ prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
+ STATUS_BLOCK_PROD_INDEX_MASK;
+ if (sb_info->sb_ack != prod) {
+ sb_info->sb_ack = prod;
+ rc |= ECORE_SB_IDX;
+ }
+
+ OSAL_MMIOWB(sb_info->p_dev);
+ return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ * written to the IGU.
+ *
+ * @param sb_info - This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using ecore_sb_init
+ * @param int_cmd - Enable/Disable/Nop
+ * @param upd_flg - whether igu consumer should be
+ * updated.
+ *
+ * @return OSAL_INLINE void
+ */
+static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
+ enum igu_int_cmd int_cmd, u8 upd_flg)
+{
+ struct igu_prod_cons_update igu_ack = { 0 };
+
+ igu_ack.sb_id_and_flags =
+ ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+ DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
+ igu_ack.sb_id_and_flags);
+#else
+ DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
+#endif
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ OSAL_MMIOWB(sb_info->p_dev);
+ OSAL_BARRIER(sb_info->p_dev);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM *addr,
+ int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
+ void OSAL_IOMEM *addr,
+ int size, u32 *data)
+#endif
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM *addr,
+ int size, u32 *data)
+{
+ __internal_ram_wr(p_hwfn, addr, size, data);
+}
+#else
+static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
+ int size, u32 *data)
+{
+ __internal_ram_wr(OSAL_NULL, addr, size, data);
+}
+#endif
+#endif
+
+struct ecore_hwfn;
+struct ecore_ptt;
+
+enum ecore_coalescing_fsm {
+ ECORE_COAL_RX_STATE_MACHINE,
+ ECORE_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief ecore_int_cau_conf_pi - configure cau for a given
+ * status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset);
+
+/**
+ *
+ * @brief ecore_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_int_mode int_mode);
+
+/**
+ *
+ * @brief ecore_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ *
+ * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ * register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
+
+#define ECORE_SP_SB_ID 0xffff
+/**
+ * @brief ecore_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info points to an uninitialized (but
+ * allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should use ECORE_SP_SB_ID for SP Status block
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id);
+/**
+ * @brief ecore_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info initialized sb_info structure
+ */
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
+
+/**
+ * @brief ecore_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info points to an allocated sb_info structure
+ * @param sb_id the sb_id to be used (zero based in driver)
+ * should never be equal to ECORE_SP_SB_ID
+ * (SP Status block)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_info *sb_info,
+ u16 sb_id);
+
+/**
+ * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
+ * default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
+
+/**
+ * @brief ecore_int_get_num_sbs - get the number of status
+ * blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_sb_cnt_info
+ *
+ * @return
+ */
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_sb_cnt_info *p_sb_cnt_info);
+
+/**
+ * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
+ * release. The API need to be called after releasing all slowpath IRQs
+ * of the device.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
new file mode 100644
index 00000000..b34a9c6b
--- /dev/null
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -0,0 +1,933 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SRIOV_API_H__
+#define __ECORE_SRIOV_API_H__
+
+#include "ecore_status.h"
+
+#define ECORE_VF_ARRAY_LENGTH (3)
+
+#define IS_VF(p_dev) ((p_dev)->b_is_vf)
+#define IS_PF(p_dev) (!((p_dev)->b_is_vf))
+#ifdef CONFIG_ECORE_SRIOV
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->sriov_info.total_vfs))
+#else
+#define IS_PF_SRIOV(p_hwfn) (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
+
+/* @@@ TBD MichalK - what should this number be*/
+#define ECORE_MAX_VF_CHAINS_PER_PF 16
+
+/* vport update extended feature tlvs flags */
+enum ecore_iov_vport_update_flag {
+ ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
+ ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
+ ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
+ ECORE_IOV_VP_UPDATE_MCAST = 3,
+ ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
+ ECORE_IOV_VP_UPDATE_RSS = 5,
+ ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
+ ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
+ ECORE_IOV_VP_UPDATE_MAX = 8,
+};
+
+struct ecore_mcp_link_params;
+struct ecore_mcp_link_state;
+struct ecore_mcp_link_capabilities;
+
+/* These defines are used by the hw-channel; should never change order */
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct ecore_vf_acquire_sw_info {
+ u32 driver_version;
+ u8 os_type;
+ bool override_fw_version;
+};
+
+struct ecore_public_vf_info {
+ /* These copies will later be reflected in the bulletin board,
+ * but this copy should be newer.
+ */
+ u8 forced_mac[ETH_ALEN];
+ u16 forced_vlan;
+};
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/* This is SW channel related only... */
+enum mbx_state {
+ VF_PF_UNKNOWN_STATE = 0,
+ VF_PF_WAIT_FOR_START_REQUEST = 1,
+ VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
+ VF_PF_REQUEST_IN_PROCESSING = 3,
+ VF_PF_RESPONSE_READY = 4,
+};
+
+struct ecore_iov_sw_mbx {
+ enum mbx_state mbx_state;
+
+ u32 request_size;
+ u32 request_offset;
+
+ u32 response_size;
+ u32 response_offset;
+};
+
+/**
+ * @brief Get the vf sw mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return struct ecore_iov_sw_mbx*
+ */
+struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+#endif
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief mark/clear all VFs before/after an incoming PCIe sriov
+ * disable.
+ *
+ * @param p_hwfn
+ * @param to_disable
+ */
+void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable);
+
+/**
+ * @brief mark/clear chosen VFs before/after an incoming PCIe
+ * sriov disable.
+ *
+ * @param p_hwfn
+ * @param to_disable
+ */
+void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id, u8 to_disable);
+
+/**
+ *
+ * @brief ecore_iov_init_hw_for_vf - initialize the HW for
+ * enabling access of a VF. Also includes preparing the
+ * IGU for VF access. This needs to be called AFTER hw is
+ * initialized and BEFORE VF is loaded inside the VM.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ * @param num_rx_queues
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id, u16 num_rx_queues);
+
+/**
+ * @brief ecore_iov_process_mbx_req - process a request received
+ * from the VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, int vfid);
+
+/**
+ * @brief ecore_iov_release_hw_for_vf - called once upper layer
+ * knows VF is done with - can release any resources
+ * allocated for VF at this point. this must be done once
+ * we know VF is no longer loaded in VM.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief ecore_iov_set_vf_ctx - set a context for a given VF
+ *
+ * @param p_hwfn
+ * @param vf_id
+ * @param ctx
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 vf_id, void *ctx);
+#endif
+
+/**
+ * @brief FLR cleanup for all VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief FLR cleanup for single VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 rel_vf_id);
+
+/**
+ * @brief Update the bulletin with link information. Notice this does NOT
+ * send a bulletin update, only updates the PF's bulletin.
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param params - the link params to use for the VF link configuration
+ * @param link - the link output to use for the VF link configuration
+ * @param p_caps - the link default capabilities.
+ */
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps);
+
+/**
+ * @brief Returns link information as perceived by VF.
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param p_params - the link params visible to vf.
+ * @param p_link - the link state visible to vf.
+ * @param p_caps - the link default capabilities visible to vf.
+ */
+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps);
+
+/**
+ * @brief return if the VF is pending FLR
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return bool
+ */
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled VF id is valid
+ * else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ *
+ * @return bool - true for valid VF ID
+ */
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only);
+
+/**
+ * @brief Get VF's public info structure
+ *
+ * @param p_hwfn
+ * @param vfid - Relative VF ID
+ * @param b_enabled_only - false if want to access even if vf is disabled
+ *
+ * @return struct ecore_public_vf_info *
+ */
+struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
+ *p_hwfn, u16 vfid,
+ bool b_enabled_only);
+
+/**
+ * @brief Set pending events bitmap for given @vfid
+ *
+ * @param p_hwfn
+ * @param vfid
+ */
+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid);
+
+/**
+ * @brief Copy pending events bitmap in @events and clear
+ * original copy of events
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events);
+
+/**
+ * @brief Copy VF's message to PF's buffer
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param vfid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *ptt, int vfid);
+/**
+ * @brief Set forced MAC address in PFs copy of bulletin board
+ * and configures FW/HW to support the configuration.
+ *
+ * @param p_hwfn
+ * @param mac
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid);
+
+/**
+ * @brief Set MAC address in PFs copy of bulletin board without
+ * configuring FW/HW.
+ *
+ * @param p_hwfn
+ * @param mac
+ * @param vfid
+ */
+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid);
+
+/**
+ * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
+ * and configures FW/HW to support the configuration.
+ * Setting of pvid 0 would clear the feature.
+ * @param p_hwfn
+ * @param pvid
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 pvid, int vfid);
+
+/**
+ * @brief Set default behaviour of VF in case no vlans are configured for it
+ * whether to accept only untagged traffic or all.
+ * Must be called prior to the VF vport-start.
+ *
+ * @param p_hwfn
+ * @param b_untagged_only
+ * @param vfid
+ *
+ * @return ECORE_SUCCESS if configuration would stick.
+ */
+enum _ecore_status_t
+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
+ bool b_untagged_only, int vfid);
+/**
+ * @brief Get VFs opaque fid.
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param opaque_fid
+ */
+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 *opaque_fid);
+
+/**
+ * @brief Get VFs VPORT id.
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param vport id
+ */
+void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
+ u8 *p_vport_id);
+
+/**
+ * @brief Check if VF has VPORT instance. This can be used
+ * to check if VPORT is active.
+ *
+ * @param p_hwfn
+ */
+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief PF posts the bulletin to the VF
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
+ int vfid,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Check if given VF (@vfid) is marked as stopped
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool : true if stopped
+ */
+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Configure VF anti spoofing
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param val - spoofchk value - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ int vfid, bool val);
+
+/**
+ * @brief Get VF's configured spoof value.
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool - spoofchk value - true/false
+ */
+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Check for SRIOV sanity by PF.
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool - true if sanity checks passes, else false
+ */
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Get the num of VF chains.
+ *
+ * @param p_hwfn
+ *
+ * @return u8
+ */
+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Get vf request mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ * @param pp_req_virt_addr
+ * @param p_req_virt_size
+ */
+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_req_virt_addr,
+ u16 *p_req_virt_size);
+
+/**
+ * @brief Get vf mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ * @param pp_reply_virt_addr
+ * @param p_reply_virt_size
+ */
+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_reply_virt_addr,
+ u16 *p_reply_virt_size);
+
+/**
+ * @brief Validate if the given length is a valid vfpf message
+ * length
+ *
+ * @param length
+ *
+ * @return bool
+ */
+bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
+
+/**
+ * @brief Return the max pfvf message length
+ *
+ * @return u32
+ */
+u32 ecore_iov_pfvf_msg_length(void);
+
+/**
+ * @brief Returns forced MAC address if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
+ */
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Returns pvid if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return 0 if no pvid is configured, otherwise the pvid.
+ */
+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+/**
+ * @brief Configure VFs tx rate
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ * @param val - tx rate value in Mb/sec.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid, int val);
+
+/**
+ * @brief - Retrieves the statistics associated with a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ * @param p_stats - this will be filled with the VF statistics
+ *
+ * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
+ */
+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid,
+ struct ecore_eth_stats *p_stats);
+
+/**
+ * @brief - Retrieves num of rxqs chains
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return num of rxqs chains.
+ */
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves num of active rxqs chains
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves ctx pointer
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves VF`s num sbs
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is waiting for acquire
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is acquired but not initialized
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is acquired and initialized
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Get VF's vport min rate configured.
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return - rate in Mbps
+ */
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief - Configure min rate for VF's vport.
+ * @param p_dev
+ * @param vfid
+ * @param - rate in Mbps
+ *
+ * @return
+ */
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+ int vfid, u32 rate);
+#else
+static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn,
+ u8 to_disable)
+{
+}
+
+static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ u8 to_disable)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct
+ ecore_hwfn
+ * p_hwfn,
+ struct
+ ecore_ptt
+ * p_ptt,
+ u16 rel_vf_id,
+ u16
+ num_rx_queues)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct
+ ecore_hwfn
+ * p_hwfn,
+ struct
+ ecore_ptt
+ * p_ptt,
+ u16
+ rel_vf_id)
+{
+ return ECORE_SUCCESS;
+}
+
+#ifndef LINUX_REMOVE
+static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn
+ *p_hwfn, u16 vf_id,
+ void *ctx)
+{
+ return ECORE_INVAL;
+}
+#endif
+static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct
+ ecore_hwfn
+ * p_hwfn,
+ struct
+ ecore_ptt
+ * p_ptt)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities
+ *p_caps)
+{
+}
+
+static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities
+ *p_caps)
+{
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return false;
+}
+
+static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only)
+{
+ return false;
+}
+
+static OSAL_INLINE struct ecore_public_vf_info *
+ ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid,
+ bool b_enabled_only)
+{
+ return OSAL_NULL;
+}
+
+static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn
+ *p_hwfn, u8 vfid)
+{
+}
+
+static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct
+ ecore_hwfn
+ * p_hwfn,
+ u64 *events)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn
+ *p_hwfn,
+ struct ecore_ptt
+ *ptt, int vfid)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn
+ *p_hwfn, u8 *mac,
+ int vfid)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct
+ ecore_hwfn
+ * p_hwfn,
+ u8 *mac,
+ int vfid)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn
+ p_hwfn, u16 pvid,
+ int vfid)
+{
+}
+
+static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn,
+ int vfid, u16 *opaque_fid)
+{
+}
+
+static OSAL_INLINE void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn,
+ int vfid, u8 *p_vport_id)
+{
+}
+
+static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn
+ *p_hwfn, int vfid)
+{
+ return false;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct
+ ecore_hwfn
+ * p_hwfn,
+ int vfid,
+ struct
+ ecore_ptt
+ * p_ptt)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn,
+ int vfid)
+{
+ return false;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn
+ *p_hwfn,
+ int vfid,
+ bool val)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn,
+ int vfid)
+{
+ return false;
+}
+
+static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn,
+ int vfid)
+{
+ return false;
+}
+
+static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn
+ *p_hwfn,
+ u16 rel_vf_id,
+ void
+ **pp_req_virt_addr,
+ u16 *
+ p_req_virt_size)
+{
+}
+
+static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn
+ *p_hwfn,
+ u16 rel_vf_id,
+ void
+ **pp_reply_virt_addr,
+ u16 *
+ p_reply_virt_size)
+{
+}
+
+static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
+{
+ return false;
+}
+
+static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void)
+{
+ return 0;
+}
+
+static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn
+ *p_hwfn, u16 rel_vf_id)
+{
+ return OSAL_NULL;
+}
+
+static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn
+ *p_hwfn,
+ u16 rel_vf_id)
+{
+ return 0;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct
+ ecore_hwfn
+ * p_hwfn,
+ struct
+ ecore_ptt
+ * p_ptt,
+ int vfid,
+ int val)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return 0;
+}
+
+static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn
+ *p_hwfn, u16 rel_vf_id)
+{
+ return 0;
+}
+
+static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return OSAL_NULL;
+}
+
+static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return 0;
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn
+ *p_hwfn, u16 rel_vf_id)
+{
+ return false;
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct
+ ecore_hwfn
+ * p_hwfn,
+ u16 rel_vf_id)
+{
+ return false;
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return false;
+}
+
+static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn,
+ int vfid)
+{
+ return 0;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(
+ struct ecore_dev *p_dev, int vfid, u32 rate)
+{
+ return ECORE_INVAL;
+}
+#endif
+#endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
new file mode 100644
index 00000000..dd53ea9c
--- /dev/null
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __IRO_H__
+#define __IRO_H__
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+(IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+(IRO[3].base + ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+(IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
+(IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
+(IRO[6].base + ((global_queue_id) * IRO[6].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[17].base + ((stat_counter_id) * IRO[17].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id) \
+(IRO[18].base + ((queue_id) * IRO[18].m1))
+#define MSTORM_PRODS_SIZE (IRO[18].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[20].base + ((stat_counter_id) * IRO[20].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+(IRO[21].base + ((queue_id) * IRO[21].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size)
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+(IRO[24].base + ((pf_id) * IRO[24].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+(IRO[25].base + ((queue_id) * IRO[25].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+(IRO[26].base + ((rss_id) * IRO[26].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+(IRO[27].base + ((rss_id) * IRO[27].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+(IRO[28].base + ((pf_id) * IRO[28].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+(IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+(IRO[30].base + ((func_id) * IRO[30].m1) + ((bdq_id) * IRO[30].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[30].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
+(IRO[31].base + ((rq_queue_id) * IRO[31].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[31].size)
+/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+(IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
+
+#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
new file mode 100644
index 00000000..c818b580
--- /dev/null
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __IRO_VALUES_H__
+#define __IRO_VALUES_H__
+
+static const struct iro iro_arr[44] = {
+ {0x0, 0x0, 0x0, 0x0, 0x8},
+ {0x4db0, 0x60, 0x0, 0x0, 0x60},
+ {0x6418, 0x20, 0x0, 0x0, 0x20},
+ {0x500, 0x8, 0x0, 0x0, 0x4},
+ {0x480, 0x8, 0x0, 0x0, 0x4},
+ {0x0, 0x8, 0x0, 0x0, 0x2},
+ {0x80, 0x8, 0x0, 0x0, 0x2},
+ {0x4938, 0x0, 0x0, 0x0, 0x78},
+ {0x3df0, 0x0, 0x0, 0x0, 0x78},
+ {0x29b0, 0x0, 0x0, 0x0, 0x78},
+ {0x4d38, 0x0, 0x0, 0x0, 0x78},
+ {0x56c8, 0x0, 0x0, 0x0, 0x78},
+ {0x7e48, 0x0, 0x0, 0x0, 0x78},
+ {0xa28, 0x8, 0x0, 0x0, 0x8},
+ {0x61f8, 0x10, 0x0, 0x0, 0x10},
+ {0xb500, 0x30, 0x0, 0x0, 0x30},
+ {0x95b8, 0x30, 0x0, 0x0, 0x30},
+ {0x5898, 0x40, 0x0, 0x0, 0x40},
+ {0x1f8, 0x10, 0x0, 0x0, 0x8},
+ {0xa228, 0x0, 0x0, 0x0, 0x4},
+ {0x8050, 0x40, 0x0, 0x0, 0x30},
+ {0xcf8, 0x8, 0x0, 0x0, 0x8},
+ {0x2b48, 0x80, 0x0, 0x0, 0x38},
+ {0xadf0, 0x0, 0x0, 0x0, 0xf0},
+ {0xaee0, 0x8, 0x0, 0x0, 0x8},
+ {0x80, 0x8, 0x0, 0x0, 0x8},
+ {0xac0, 0x8, 0x0, 0x0, 0x8},
+ {0x2578, 0x8, 0x0, 0x0, 0x8},
+ {0x24f8, 0x8, 0x0, 0x0, 0x8},
+ {0x0, 0x8, 0x0, 0x0, 0x8},
+ {0x200, 0x10, 0x8, 0x0, 0x8},
+ {0x17f8, 0x8, 0x0, 0x0, 0x2},
+ {0x19f8, 0x10, 0x8, 0x0, 0x2},
+ {0xd988, 0x38, 0x0, 0x0, 0x24},
+ {0x11040, 0x10, 0x0, 0x0, 0x8},
+ {0x11670, 0x38, 0x0, 0x0, 0x18},
+ {0xaeb8, 0x30, 0x0, 0x0, 0x10},
+ {0x86f8, 0x28, 0x0, 0x0, 0x18},
+ {0xebf8, 0x10, 0x0, 0x0, 0x10},
+ {0xde08, 0x40, 0x0, 0x0, 0x30},
+ {0x121a0, 0x38, 0x0, 0x0, 0x8},
+ {0xf060, 0x20, 0x0, 0x0, 0x20},
+ {0x2b80, 0x80, 0x0, 0x0, 0x10},
+ {0x50a0, 0x10, 0x0, 0x0, 0x10},
+};
+
+#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
new file mode 100644
index 00000000..9e6ef5a4
--- /dev/null
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -0,0 +1,1807 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_l2.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_vf.h"
+#include "ecore_sriov.h"
+#include "ecore_mcp.h"
+
+#define ECORE_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params)
+{
+ struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+ u8 abs_vport_id = 0;
+ u16 rx_mode = 0;
+
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+ p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+ p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
+ p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
+
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+ p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
+
+ /* TPA related fields */
+ OSAL_MEMSET(&p_ramrod->tpa_param, 0,
+ sizeof(struct eth_vport_tpa_param));
+ p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+
+ switch (p_params->tpa_mode) {
+ case ECORE_TPA_MODE_GRO:
+ p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ p_ramrod->tpa_param.tpa_max_size = (u16)-1;
+ p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+ p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
+ p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+ break;
+ default:
+ break;
+ }
+
+ p_ramrod->tx_switching_en = p_params->tx_switching;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ p_ramrod->tx_switching_en = 0;
+#endif
+
+ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+ p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
+ p_params->concrete_fid);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params)
+{
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+ p_params->mtu,
+ p_params->remove_inner_vlan,
+ p_params->tpa_mode,
+ p_params->max_buffers_per_cqe,
+ p_params->only_untagged);
+
+ return ecore_sp_eth_vport_start(p_hwfn, p_params);
+}
+
+static enum _ecore_status_t
+ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_rss_params *p_rss)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct eth_vport_rss_config *p_config;
+ u16 abs_l2_queue = 0;
+ int i;
+
+ if (!p_rss) {
+ p_ramrod->common.update_rss_flg = 0;
+ return rc;
+ }
+ p_config = &p_ramrod->rss_config;
+
+ OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
+ ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+ rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+ p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+ p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+ p_config->update_rss_key = p_rss->update_rss_key;
+
+ p_config->rss_mode = p_rss->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
+
+ p_config->capabilities = 0;
+
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
+ SET_FIELD(p_config->capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+ !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
+ p_config->tbl_size = p_rss->rss_table_size_log;
+ p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+ p_ramrod->common.update_rss_flg,
+ p_config->rss_mode,
+ p_config->update_rss_capabilities,
+ p_config->capabilities,
+ p_config->update_rss_ind_table, p_config->update_rss_key);
+
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ rc = ecore_fw_l2_queue(p_hwfn,
+ (u8)p_rss->rss_ind_table[i],
+ &abs_l2_queue);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
+ i, p_config->indirection_table[i]);
+ }
+
+ for (i = 0; i < 10; i++)
+ p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
+
+ return rc;
+}
+
+static void
+ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_filter_accept_flags flags)
+{
+ p_ramrod->common.update_rx_mode_flg = flags.update_rx_mode_config;
+ p_ramrod->common.update_tx_mode_flg = flags.update_tx_mode_config;
+
+#ifndef ASIC_ONLY
+ /* On B0 emulation we cannot enable Tx, since this would cause writes
+ * to PVFC HW block which isn't implemented in emulation.
+ */
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Non-Asic - prevent Tx mode in vport update\n");
+ p_ramrod->common.update_tx_mode_flg = 0;
+ }
+#endif
+
+ /* Set Rx mode accept flags */
+ if (p_ramrod->common.update_rx_mode_flg) {
+ __le16 *state = &p_ramrod->rx_mode.state;
+ u8 accept_filter = flags.rx_accept_filter;
+
+/*
+ * SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ * !!(accept_filter & ECORE_ACCEPT_NONE));
+ */
+
+ SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
+/*
+ * SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ * !!(accept_filter & ECORE_ACCEPT_NONE));
+ */
+ SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(*state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "p_ramrod->rx_mode.state = 0x%x\n",
+ p_ramrod->rx_mode.state);
+ }
+
+ /* Set Tx mode accept flags */
+ if (p_ramrod->common.update_tx_mode_flg) {
+ __le16 *state = &p_ramrod->tx_mode.state;
+ u8 accept_filter = flags.tx_accept_filter;
+
+ SET_FIELD(*state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ !!(accept_filter & ECORE_ACCEPT_NONE));
+
+ SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ !!(accept_filter & ECORE_ACCEPT_NONE));
+
+ SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(*state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & ECORE_ACCEPT_BCAST));
+ /* @DPDK */
+ /* ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL and
+ * ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL
+ * needs to be set for VF-VF communication to work
+ * when dest macaddr is unknown.
+ */
+ SET_FIELD(*state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "p_ramrod->tx_mode.state = 0x%x\n",
+ p_ramrod->tx_mode.state);
+ }
+}
+
+static void
+ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_sge_tpa_params *p_params)
+{
+ struct eth_vport_tpa_param *p_tpa;
+
+ if (!p_params) {
+ p_ramrod->common.update_tpa_param_flg = 0;
+ p_ramrod->common.update_tpa_en_flg = 0;
+ p_ramrod->common.update_tpa_param_flg = 0;
+ return;
+ }
+
+ p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+ p_tpa = &p_ramrod->tpa_param;
+ p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+ p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+ p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+ p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+ p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+ p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+ p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+ p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+ p_tpa->tpa_max_size = p_params->tpa_max_size;
+ p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
+ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct ecore_sp_vport_update_params *p_params)
+{
+ int i;
+
+ OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+
+ if (!p_params->update_approx_mcast_flg)
+ return;
+
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = (u32 *)p_params->bins;
+
+ p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+ }
+}
+
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+ u8 abs_vport_id = 0, val;
+ u16 wordval;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
+ return rc;
+ }
+
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Copy input params to ramrod according to FW struct */
+ p_ramrod = &p_ent->ramrod.vport_update;
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ p_ramrod->common.rx_active_flg = p_params->vport_active_rx_flg;
+ p_ramrod->common.tx_active_flg = p_params->vport_active_tx_flg;
+ val = p_params->update_vport_active_rx_flg;
+ p_ramrod->common.update_rx_active_flg = val;
+ val = p_params->update_vport_active_tx_flg;
+ p_ramrod->common.update_tx_active_flg = val;
+ val = p_params->update_inner_vlan_removal_flg;
+ p_ramrod->common.update_inner_vlan_removal_en_flg = val;
+ val = p_params->inner_vlan_removal_flg;
+ p_ramrod->common.inner_vlan_removal_en = val;
+ val = p_params->silent_vlan_removal_flg;
+ p_ramrod->common.silent_vlan_removal_en = val;
+ val = p_params->update_tx_switching_flg;
+ p_ramrod->common.update_tx_switching_en_flg = val;
+ val = p_params->update_default_vlan_enable_flg;
+ p_ramrod->common.update_default_vlan_en_flg = val;
+ p_ramrod->common.default_vlan_en = p_params->default_vlan_enable_flg;
+ val = p_params->update_default_vlan_flg;
+ p_ramrod->common.update_default_vlan_flg = val;
+ wordval = p_params->default_vlan;
+ p_ramrod->common.default_vlan = OSAL_CPU_TO_LE16(wordval);
+
+ p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ if (p_ramrod->common.tx_switching_en ||
+ p_ramrod->common.update_tx_switching_en_flg) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA - why are we seeing tx-switching? Overriding it\n");
+ p_ramrod->common.tx_switching_en = 0;
+ p_ramrod->common.update_tx_switching_en_flg = 1;
+ }
+#endif
+
+ val = p_params->update_anti_spoofing_en_flg;
+ p_ramrod->common.update_anti_spoofing_en_flg = val;
+ p_ramrod->common.anti_spoofing_en = p_params->anti_spoofing_en;
+ p_ramrod->common.accept_any_vlan = p_params->accept_any_vlan;
+ val = p_params->update_accept_any_vlan_flg;
+ p_ramrod->common.update_accept_any_vlan_flg = val;
+
+ rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+ if (rc != ECORE_SUCCESS) {
+ /* Return spq entry which is taken in ecore_sp_init_request() */
+ ecore_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+ ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
+ p_params->sge_tpa_params);
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u8 vport_id)
+{
+ struct vport_stop_ramrod_data *p_ramrod;
+ struct ecore_sp_init_data init_data;
+ struct ecore_spq_entry *p_ent;
+ enum _ecore_status_t rc;
+ u8 abs_vport_id = 0;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_vport_stop(p_hwfn);
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_stop;
+ p_ramrod->vport_id = abs_vport_id;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t
+ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_accept_flags *p_accept_flags)
+{
+ struct ecore_sp_vport_update_params s_params;
+
+ OSAL_MEMSET(&s_params, 0, sizeof(s_params));
+ OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
+ sizeof(struct ecore_filter_accept_flags));
+
+ return ecore_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
+enum _ecore_status_t
+ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+ u8 vport,
+ struct ecore_filter_accept_flags accept_flags,
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_sp_vport_update_params update_params;
+ int i, rc;
+
+ /* Prepare and send the vport rx_mode change */
+ OSAL_MEMSET(&update_params, 0, sizeof(update_params));
+ update_params.vport_id = vport;
+ update_params.accept_flags = accept_flags;
+ update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+ update_params.accept_any_vlan = accept_any_vlan;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (IS_VF(p_dev)) {
+ rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ continue;
+ }
+
+ rc = ecore_sp_vport_update(p_hwfn, &update_params,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+ accept_flags.rx_accept_filter,
+ accept_flags.tx_accept_filter);
+
+ if (update_accept_any_vlan)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "accept_any_vlan=%d configured\n",
+ accept_any_vlan);
+ }
+
+ return 0;
+}
+
+static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_cid_data *p_cid_data)
+{
+ if (!p_cid_data->b_cid_allocated)
+ return;
+
+ ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
+ p_cid_data->b_cid_allocated = false;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ u16 rx_queue_id,
+ u8 vport_id,
+ u8 stats_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+{
+ struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+
+ /* Store information for the stop */
+ p_rx_cid->cid = cid;
+ p_rx_cid->opaque_fid = opaque_fid;
+ p_rx_cid->vport_id = vport_id;
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, cid, rx_queue_id, vport_id, sb);
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = cid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
+ p_ramrod->sb_index = sb_index;
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
+
+ p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u8 rx_queue_id,
+ u8 vport_id,
+ u8 stats_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod)
+{
+ struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ u8 abs_stats_id = 0;
+ u16 abs_l2_queue = 0;
+ enum _ecore_status_t rc;
+ u64 init_prod_val = 0;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ return ecore_vf_pf_rxq_start(p_hwfn,
+ rx_queue_id,
+ sb,
+ sb_index,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size, pp_prod);
+ }
+
+ rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM + MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)(&init_prod_val));
+
+ /* Allocate a CID for the queue */
+ rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_rx_cid->b_cid_allocated = true;
+
+ rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
+ opaque_fid,
+ p_rx_cid->cid,
+ rx_queue_id,
+ vport_id,
+ abs_stats_id,
+ sb,
+ sb_index,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+
+ if (rc != ECORE_SUCCESS)
+ ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+ struct ecore_hw_cid_data *p_rx_cid;
+ u16 qid, abs_rx_q_id = 0;
+ u8 i;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_rxqs_update(p_hwfn,
+ rx_queue_id,
+ num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg);
+
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ for (i = 0; i < num_rxqs; i++) {
+ qid = rx_queue_id + i;
+ p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+ /* Get SPQ entry */
+ init_data.cid = p_rx_cid->cid;
+ init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+ ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = complete_cqe_flg;
+ p_ramrod->complete_event_flg = complete_event_flg;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion)
+{
+ struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
+ cqe_completion);
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_rx_cid->cid;
+ init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+ ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+ /* Cleaning the queue requires the completion to arrive there.
+ * In addition, VFs require the answer to come as eqe to PF.
+ */
+ p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
+ p_hwfn->hw_info.opaque_fid) &&
+ !eq_completion_only) || cqe_completion;
+ p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
+ p_hwfn->hw_info.opaque_fid) ||
+ eq_completion_only;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u16 tx_queue_id,
+ u32 cid,
+ u8 vport_id,
+ u8 stats_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union ecore_qm_pq_params *p_pq_params)
+{
+ struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+ u16 pq_id, abs_tx_q_id = 0;
+ u8 abs_vport_id;
+
+ /* Store information for the stop */
+ p_tx_cid->cid = cid;
+ p_tx_cid->opaque_fid = opaque_fid;
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, tx_queue_id, &abs_tx_q_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = cid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
+ p_ramrod->sb_index = sb_index;
+ p_ramrod->stats_counter_id = stats_id;
+
+ p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
+
+ p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
+ p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
+ p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+
+ pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
+ p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u16 tx_queue_id,
+ u8 vport_id,
+ u8 stats_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell)
+{
+ struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ union ecore_qm_pq_params pq_params;
+ enum _ecore_status_t rc;
+ u8 abs_stats_id = 0;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ return ecore_vf_pf_txq_start(p_hwfn,
+ tx_queue_id,
+ sb,
+ sb_index,
+ pbl_addr, pbl_size, pp_doorbell);
+ }
+
+ rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
+ OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+
+ /* Allocate a CID for the queue */
+ rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+ return rc;
+ }
+ p_tx_cid->b_cid_allocated = true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ opaque_fid, p_tx_cid->cid, tx_queue_id, vport_id, sb);
+
+ /* TODO - set tc in the pq_params for multi-cos */
+ rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
+ opaque_fid,
+ tx_queue_id,
+ p_tx_cid->cid,
+ vport_id,
+ abs_stats_id,
+ sb,
+ sb_index,
+ pbl_addr, pbl_size, &pq_params);
+
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+ if (rc != ECORE_SUCCESS)
+ ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
+{
+ return ECORE_NOTIMPL;
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ u16 tx_queue_id)
+{
+ struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ struct tx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_tx_cid->cid;
+ init_data.opaque_fid = p_tx_cid->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_stop;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ return rc;
+}
+
+static enum eth_filter_action
+ecore_filter_action(enum ecore_filter_opcode opcode)
+{
+ enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+ switch (opcode) {
+ case ECORE_FILTER_ADD:
+ action = ETH_FILTER_ACTION_ADD;
+ break;
+ case ECORE_FILTER_REMOVE:
+ action = ETH_FILTER_ACTION_REMOVE;
+ break;
+ case ECORE_FILTER_FLUSH:
+ action = ETH_FILTER_ACTION_REMOVE_ALL;
+ break;
+ default:
+ action = MAX_ETH_FILTER_ACTION;
+ }
+
+ return action;
+}
+
+static void ecore_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static enum _ecore_status_t
+ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ struct vport_filter_update_ramrod_data **pp_ramrod,
+ struct ecore_spq_entry **pp_ent,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod;
+ u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct eth_filter_cmd *p_first_filter;
+ struct eth_filter_cmd *p_second_filter;
+ struct ecore_sp_init_data init_data;
+ enum eth_filter_action action;
+ enum _ecore_status_t rc;
+
+ rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &vport_to_remove_from);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &vport_to_add_to);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, pp_ent,
+ ETH_RAMROD_FILTERS_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+ p_ramrod = *pp_ramrod;
+ p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+ p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Non-Asic - prevent Tx filters\n");
+ p_ramrod->filter_cmd_hdr.tx = 0;
+ }
+#endif
+
+ switch (p_filter_cmd->opcode) {
+ case ECORE_FILTER_REPLACE:
+ case ECORE_FILTER_MOVE:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
+ break;
+ default:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
+ break;
+ }
+
+ p_first_filter = &p_ramrod->filter_cmds[0];
+ p_second_filter = &p_ramrod->filter_cmds[1];
+
+ switch (p_filter_cmd->type) {
+ case ECORE_FILTER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC;
+ break;
+ case ECORE_FILTER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_VLAN;
+ break;
+ case ECORE_FILTER_MAC_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_PAIR;
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
+ break;
+ case ECORE_FILTER_VNI:
+ p_first_filter->type = ETH_FILTER_TYPE_VNI;
+ break;
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
+ ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
+ &p_first_filter->mac_mid,
+ &p_first_filter->mac_lsb,
+ (u8 *)p_filter_cmd->mac);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+ p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+ p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
+
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+ p_first_filter->vport_id = vport_to_remove_from;
+
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
+ } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
+ p_first_filter->vport_id = vport_to_add_to;
+ OSAL_MEMCPY(p_second_filter, p_first_filter,
+ sizeof(*p_second_filter));
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ } else {
+ action = ecore_filter_action(p_filter_cmd->opcode);
+
+ if (action == MAX_ETH_FILTER_ACTION) {
+ DP_NOTICE(p_hwfn, true,
+ "%d is not supported yet\n",
+ p_filter_cmd->opcode);
+ return ECORE_NOTIMPL;
+ }
+
+ p_first_filter->action = action;
+ p_first_filter->vport_id =
+ (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+ vport_to_remove_from : vport_to_add_to;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct eth_filter_cmd_header *p_header;
+ enum _ecore_status_t rc;
+
+ rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+ &p_ramrod, &p_ent,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+ return rc;
+ }
+ p_header = &p_ramrod->filter_cmd_hdr;
+ p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+ (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
+ ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+ "REMOVE" :
+ ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
+ "MOVE" : "REPLACE")),
+ (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
+ ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
+ "VLAN" : "MAC & VLAN"),
+ p_ramrod->filter_cmd_hdr.cmd_cnt,
+ p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+ p_filter_cmd->vport_to_add_to,
+ p_filter_cmd->vport_to_remove_from,
+ p_filter_cmd->mac[0], p_filter_cmd->mac[1],
+ p_filter_cmd->mac[2], p_filter_cmd->mac[3],
+ p_filter_cmd->mac[4], p_filter_cmd->mac[5],
+ p_filter_cmd->vlan);
+
+ return ECORE_SUCCESS;
+}
+
+/*******************************************************************************
+ * Description:
+ * Calculates crc 32 on a buffer
+ * Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 ecore_calc_crc32c(u8 *crc32_packet,
+ u32 crc32_length, u32 crc32_seed, u8 complement)
+{
+ u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+ u8 msb = 0, current_byte = 0;
+
+ if ((crc32_packet == OSAL_NULL) ||
+ (crc32_length == 0) || ((crc32_length % 8) != 0)) {
+ return crc32_result;
+ }
+
+ for (byte = 0; byte < crc32_length; byte++) {
+ current_byte = crc32_packet[byte];
+ for (bit = 0; bit < 8; bit++) {
+ msb = (u8)(crc32_result >> 31);
+ crc32_result = crc32_result << 1;
+ if (msb != (0x1 & (current_byte >> bit))) {
+ crc32_result = crc32_result ^ CRC32_POLY;
+ crc32_result |= 1;
+ }
+ }
+ }
+
+ return crc32_result;
+}
+
+static OSAL_INLINE u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+{
+ u32 packet_buf[2] = { 0 };
+
+ OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
+ return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+u8 ecore_mcast_bin_from_mac(u8 *mac)
+{
+ u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+ mac, ETH_ALEN);
+
+ return crc & 0xff;
+}
+
+static enum _ecore_status_t
+ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc;
+ u8 abs_vport_id = 0;
+ int i;
+
+ rc = ecore_fw_vport(p_hwfn,
+ (p_filter_cmd->opcode == ECORE_FILTER_ADD) ?
+ p_filter_cmd->vport_to_add_to :
+ p_filter_cmd->vport_to_remove_from, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_ramrod->common.update_approx_mcast_flg = 1;
+
+ /* explicitly clear out the entire vector */
+ OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
+ 0, sizeof(p_ramrod->approx_mcast.bins));
+ OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport.
+ */
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ OSAL_SET_BIT(bit, bins);
+ }
+
+ /* Convert to correct endianity */
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ struct vport_update_ramrod_mcast *p_ramrod_bins;
+ u32 *p_bins = (u32 *)bins;
+
+ p_ramrod_bins = &p_ramrod->approx_mcast;
+ p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+ }
+ }
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ /* only ADD and REMOVE operations are supported for multi-cast */
+ if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
+ (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
+ (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
+ return ECORE_INVAL;
+ }
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (IS_VF(p_dev)) {
+ ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+ continue;
+ }
+
+ rc = ecore_sp_eth_filter_mcast(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_filter_cmd,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (IS_VF(p_dev)) {
+ rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+ continue;
+ }
+
+ rc = ecore_sp_eth_filter_ucast(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_filter_cmd,
+ comp_mode, p_comp_data);
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+/* IOV related */
+enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(opaque_vfid);
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ return ECORE_INVAL;
+ }
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn)
+{
+ return ECORE_NOTIMPL;
+}
+
+enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+/* Statistics related code */
+static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+ }
+}
+
+static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_pstorm_per_queue_stat pstats;
+ u32 pstats_addr = 0, pstats_len = 0;
+
+ __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ tstats_len = sizeof(struct tstorm_per_port_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+ tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+ }
+
+ OSAL_MEMSET(&tstats, 0, sizeof(tstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
+
+ p_stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+ *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+ }
+}
+
+static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_ustorm_per_queue_stat ustats;
+ u32 ustats_addr = 0, ustats_len = 0;
+
+ __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
+ u32 *p_addr, u32 *p_len,
+ u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->p_dev)) {
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ } else {
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+ }
+}
+
+static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_mstorm_per_queue_stat mstats;
+ u32 mstats_addr = 0, mstats_len = 0;
+
+ __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+ statistics_bin);
+
+ OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+ ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
+
+ p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *p_stats)
+{
+ struct port_stats port_stats;
+ int j;
+
+ OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
+
+ ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, stats),
+ sizeof(port_stats));
+
+ p_stats->rx_64_byte_packets += port_stats.pmm.r64;
+ p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
+ p_stats->rx_crc_errors += port_stats.pmm.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+ p_stats->rx_pause_frames += port_stats.pmm.rxpf;
+ p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
+ p_stats->rx_align_errors += port_stats.pmm.raln;
+ p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
+ p_stats->rx_oversize_packets += port_stats.pmm.rovr;
+ p_stats->rx_jabbers += port_stats.pmm.rjbr;
+ p_stats->rx_undersize_packets += port_stats.pmm.rund;
+ p_stats->rx_fragments += port_stats.pmm.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.pmm.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+ p_stats->tx_pause_frames += port_stats.pmm.txpf;
+ p_stats->tx_pfc_frames += port_stats.pmm.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+ p_stats->tx_total_collisions += port_stats.pmm.tncl;
+ p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+ p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+ for (j = 0; j < 8; j++) {
+ p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+}
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *stats,
+ u16 statistics_bin, bool b_get_port_stats)
+{
+ __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+#ifndef ASIC_ONLY
+ /* Avoid getting PORT stats for emulation. */
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ return;
+#endif
+
+ if (b_get_port_stats && p_hwfn->mcp_info)
+ __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats)
+{
+ u8 fw_vport = 0;
+ int i;
+
+ OSAL_MEMSET(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
+ ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+
+ if (IS_PF(p_dev)) {
+ /* The main vport index is relative first */
+ if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ goto out;
+ }
+ }
+
+ if (IS_PF(p_dev) && !p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+ IS_PF(p_dev) ? true : false);
+
+out:
+ if (IS_PF(p_dev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats)
+{
+ u32 i;
+
+ if (!p_dev) {
+ OSAL_MEMSET(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ _ecore_get_vport_stats(p_dev, stats);
+
+ if (!p_dev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void ecore_reset_vport_stats(struct ecore_dev *p_dev)
+{
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
+ ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+ u32 addr = 0, len = 0;
+
+ if (IS_PF(p_dev) && !p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+ __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+ OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+ __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+ OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+ __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+ if (IS_PF(p_dev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!p_dev->reset_stats)
+ DP_INFO(p_dev, "Reset stats not allocated\n");
+ else
+ _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
new file mode 100644
index 00000000..b0850ca4
--- /dev/null
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_L2_H__
+#define __ECORE_L2_H__
+
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_spq.h"
+#include "ecore_l2_api.h"
+
+/**
+ * @brief ecore_sp_vf_start - VF Function Start
+ *
+ * This ramrod is sent to initialize a virtual function (VF) is loaded.
+ * It will configure the function related parameters.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param concrete_vfid VF ID
+ * @param opaque_vfid
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid);
+
+/**
+ * @brief ecore_sp_vf_update - VF Function Update Ramrod
+ *
+ * This ramrod performs updates of a virtual function (VF).
+ * It currently contains no functionality.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_vf_stop - VF Function Stop Ramrod
+ *
+ * This ramrod is sent to unload a virtual function (VF).
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param concrete_vfid
+ * @param opaque_vfid
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid);
+
+/**
+ * @brief ecore_sp_eth_tx_queue_update -
+ *
+ * This ramrod updates a TX queue. It is used for setting the active
+ * state of the queue.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params);
+
+/**
+ * @brief - Starts an Rx queue; Should be used where contexts are handled
+ * outside of the ramrod area [specifically iov scenarios]
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param cid
+ * @param rx_queue_id
+ * @param vport_id
+ * @param stats_id
+ * @param sb
+ * @param sb_index
+ * @param bd_max_bytes
+ * @param bd_chain_phys_addr
+ * @param cqe_pbl_addr
+ * @param cqe_pbl_size
+ * @param leading
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ u16 rx_queue_id,
+ u8 vport_id,
+ u8 stats_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+/**
+ * @brief - Starts a Tx queue; Should be used where contexts are handled
+ * outside of the ramrod area [specifically iov scenarios]
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param tx_queue_id
+ * @param cid
+ * @param vport_id
+ * @param stats_id
+ * @param sb
+ * @param sb_index
+ * @param pbl_addr
+ * @param pbl_size
+ * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u16 tx_queue_id,
+ u32 cid,
+ u8 vport_id,
+ u8 stats_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union ecore_qm_pq_params *p_pq_params);
+
+u8 ecore_mcast_bin_from_mac(u8 *mac);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
new file mode 100644
index 00000000..b41dd7fe
--- /dev/null
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_L2_API_H__
+#define __ECORE_L2_API_H__
+
+#include "ecore_status.h"
+#include "ecore_sp_api.h"
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_rss_caps {
+ ECORE_RSS_IPV4 = 0x1,
+ ECORE_RSS_IPV6 = 0x2,
+ ECORE_RSS_IPV4_TCP = 0x4,
+ ECORE_RSS_IPV6_TCP = 0x8,
+ ECORE_RSS_IPV4_UDP = 0x10,
+ ECORE_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define ECORE_RSS_IND_TABLE_SIZE 128
+#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
+#endif
+
+struct ecore_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+ u32 rss_key[ECORE_RSS_KEY_SIZE];
+};
+
+struct ecore_sge_tpa_params {
+ u8 max_buffers_per_cqe;
+
+ u8 update_tpa_en_flg;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+
+ u8 update_tpa_param_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+};
+
+enum ecore_filter_opcode {
+ ECORE_FILTER_ADD,
+ ECORE_FILTER_REMOVE,
+ ECORE_FILTER_MOVE,
+ ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ ECORE_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum ecore_filter_ucast_type {
+ ECORE_FILTER_MAC,
+ ECORE_FILTER_VLAN,
+ ECORE_FILTER_MAC_VLAN,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_FILTER_VNI,
+};
+
+struct ecore_filter_ucast {
+ enum ecore_filter_opcode opcode;
+ enum ecore_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct ecore_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum ecore_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define ECORE_MAX_MC_ADDRS 64
+ unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct ecore_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define ECORE_ACCEPT_NONE 0x01
+#define ECORE_ACCEPT_UCAST_MATCHED 0x02
+#define ECORE_ACCEPT_UCAST_UNMATCHED 0x04
+#define ECORE_ACCEPT_MCAST_MATCHED 0x08
+#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
+#define ECORE_ACCEPT_BCAST 0x20
+};
+
+/* Add / remove / move / remove-all unicast MAC-VLAN filters.
+ * FW will assert in the following cases, so driver should take care...:
+ * 1. Adding a filter to a full table.
+ * 2. Adding a filter which already exists on that vport.
+ * 3. Removing a filter which doesn't exist.
+ */
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/* Add / remove / move multicast MAC filters. */
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+ struct ecore_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/* Set "accept" filters */
+enum _ecore_status_t
+ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+ u8 vport,
+ struct ecore_filter_accept_flags accept_flags,
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ *
+ * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
+ * the VPort ID is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param rx_queue_id RX Queue ID: Zero based, per VPort, allocated
+ * by assignment (=rssId)
+ * @param vport_id VPort ID
+ * @param u8 stats_id VPort ID which the queue stats
+ * will be added to
+ * @param sb Status Block of the Function Event Ring
+ * @param sb_index Index into the status block of the
+ * Function Event Ring
+ * @param bd_max_bytes Maximum bytes that can be placed on a BD
+ * @param bd_chain_phys_addr Physical address of BDs for receive.
+ * @param cqe_pbl_addr Physical address of the CQE PBL Table.
+ * @param cqe_pbl_size Size of the CQE PBL Table
+ * @param pp_prod Pointer to place producer's
+ * address for the Rx Q (May be
+ * NULL).
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u8 rx_queue_id,
+ u8 vport_id,
+ u8 stats_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod);
+
+/**
+ * @brief ecore_sp_eth_rx_queue_stop -
+ *
+ * This ramrod closes an RX queue. It sends RX queue stop ramrod
+ * + CFC delete ramrod
+ *
+ * @param p_hwfn
+ * @param rx_queue_id RX Queue ID
+ * @param eq_completion_only If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @param cqe_completion If True completion will be
+ * receive on CQe.
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion);
+
+/**
+ * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ *
+ * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
+ * the VPort is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param tx_queue_id TX Queue ID
+ * @param vport_id VPort ID
+ * @param stats_id VPort ID which the queue stats
+ * will be added to
+ * @param sb Status Block of the Function Event Ring
+ * @param sb_index Index into the status block of the Function
+ * Event Ring
+ * @param pbl_addr address of the pbl array
+ * @param pbl_size number of entries in pbl
+ * @param pp_doorbell Pointer to place doorbell pointer (May be NULL).
+ * This address should be used with the
+ * DIRECT_REG_WR macro.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u16 tx_queue_id,
+ u8 vport_id,
+ u8 stats_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM * *
+ pp_doorbell);
+
+/**
+ * @brief ecore_sp_eth_tx_queue_stop -
+ *
+ * This ramrod closes a TX queue. It sends TX queue stop ramrod
+ * + CFC delete ramrod
+ *
+ * @param p_hwfn
+ * @param tx_queue_id TX Queue ID
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ u16 tx_queue_id);
+
+enum ecore_tpa_mode {
+ ECORE_TPA_MODE_NONE,
+ ECORE_TPA_MODE_RSC,
+ ECORE_TPA_MODE_GRO,
+ ECORE_TPA_MODE_MAX
+};
+
+struct ecore_sp_vport_start_params {
+ enum ecore_tpa_mode tpa_mode;
+ bool remove_inner_vlan; /* Inner VLAN removal is enabled */
+ bool tx_switching; /* Vport supports tx-switching */
+ bool handle_ptp_pkts; /* Handle PTP packets */
+ bool only_untagged; /* Untagged pkt control */
+ bool drop_ttl0; /* Drop packets with TTL = 0 */
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id; /* VPORT ID */
+ u16 mtu; /* VPORT MTU */
+ bool zero_placement_offset;
+};
+
+/**
+ * @brief ecore_sp_vport_start -
+ *
+ * This ramrod initializes a VPort. An Assert if generated if the Function ID
+ * of the VPort is not enabled.
+ *
+ * @param p_hwfn
+ * @param p_params VPORT start params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_start_params *p_params);
+
+struct ecore_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ unsigned long bins[8];
+ struct ecore_rss_params *rss_params;
+ struct ecore_filter_accept_flags accept_flags;
+ struct ecore_sge_tpa_params *sge_tpa_params;
+};
+
+/**
+ * @brief ecore_sp_vport_update -
+ *
+ * This ramrod updates the parameters of the VPort. Every field can be updated
+ * independently, according to flags.
+ *
+ * This ramrod is also used to set the VPort state to active after creation.
+ * An Assert is generated if the VPort does not contain an RX queue.
+ *
+ * @param p_hwfn
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+/**
+ * @brief ecore_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u8 vport_id);
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id RX Queue ID
+ * @param num_rxqs Allow to update multiple rx
+ * queues, from rx_queue_id to
+ * (rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg Post completion to the CQE Ring if set
+ * @param complete_event_flg Post completion to the Event Ring if set
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_eth_stats *stats,
+ u16 statistics_bin, bool b_get_port_stats);
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+ struct ecore_eth_stats *stats);
+
+void ecore_reset_vport_stats(struct ecore_dev *p_dev);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
new file mode 100644
index 00000000..9dd2eed3
--- /dev/null
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -0,0 +1,1932 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sriov.h"
+#include "ecore_iov_api.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_dcbx.h"
+
+#define CHIP_MCP_RESP_ITER_US 10
+#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
+
+#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+#define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+ ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+ ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+ OFFSETOF(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+ OFFSETOF(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+ DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+#ifndef ASIC_ONLY
+static int loaded;
+static int loaded_port[MAX_NUM_PORTS] = { 0 };
+#endif
+
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+ return false;
+ return true;
+}
+
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PORT);
+ u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+
+ p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+ MFW_PORT(p_hwfn));
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "port_addr = 0x%x, port_id 0x%02x\n",
+ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+ OSAL_BE32 tmp;
+ u32 i;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
+ return;
+#endif
+
+ if (!p_hwfn->mcp_info->public_base)
+ return;
+
+ for (i = 0; i < length; i++) {
+ tmp = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->mfw_mb_addr +
+ (i << 2) + sizeof(u32));
+
+ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+ OSAL_BE32_TO_CPU(tmp);
+ }
+}
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->mcp_info) {
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
+ }
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ p_hwfn->mcp_info = OSAL_NULL;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
+ p_info->public_base = 0;
+ return ECORE_INVAL;
+ }
+#endif
+
+ p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base)
+ return ECORE_INVAL;
+
+ p_info->public_base |= GRCBASE_MCP;
+
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_DRV_MB));
+ p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+ /* Set the MFW MB address */
+ mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr);
+
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
+ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Get current FW pulse sequence */
+ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK;
+
+ p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *p_info;
+ u32 size;
+
+ /* Allocate mcp_info structure */
+ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_hwfn->mcp_info));
+ if (!p_hwfn->mcp_info)
+ goto err;
+ p_info = p_hwfn->mcp_info;
+
+ if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
+ /* Do not free mcp_info here, since public_base indicate that
+ * the MCP is not initialized
+ */
+ return ECORE_SUCCESS;
+ }
+
+ size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+ p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+ goto err;
+
+ /* Initialize the MFW spinlock */
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
+ OSAL_SPIN_LOCK_INIT(&p_info->lock);
+
+ return ECORE_SUCCESS;
+
+err:
+ DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
+ ecore_mcp_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 org_mcp_reset_seq, cnt = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+#endif
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+
+ /* Set drv command along with the updated sequence */
+ org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
+
+ do {
+ /* Wait for MFW response */
+ OSAL_UDELAY(delay);
+ /* Give the FW up to 500 second (50*1000*10usec) */
+ } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0)) &&
+ (cnt++ < ECORE_MCP_RESET_RETRIES));
+
+ if (org_mcp_reset_seq !=
+ ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MCP was reset after %d usec\n", cnt * delay);
+ } else {
+ DP_ERR(p_hwfn, "Failed to reset MCP\n");
+ rc = ECORE_AGAIN;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+
+ return rc;
+}
+
+/* Should be called while the dedicated spinlock is acquired */
+static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd, u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 seq, cnt = 1, actual_mb_seq;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+#endif
+
+ /* Get actual driver mailbox sequence */
+ actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Use MCP history register to check if MCP reset occurred between
+ * init time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist !=
+ ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
+ ecore_load_mcp_offsets(p_hwfn, p_ptt);
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+ /* Set drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+ /* Set drv command along with the updated sequence */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "wrote command (%x) to MFW MB param 0x%08x\n",
+ (cmd | seq), param);
+
+ do {
+ /* Wait for MFW response */
+ OSAL_UDELAY(delay);
+ *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+ /* Give the FW up to 5 second (500*10ms) */
+ } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+ (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+ cnt * delay, *o_mcp_resp, seq);
+
+ /* Is this a reply to our command? */
+ if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+ *o_mcp_resp &= FW_MSG_CODE_MASK;
+ /* Get the MCP param */
+ *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+ } else {
+ /* FW BUG! */
+ DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+ cmd, param);
+ *o_mcp_resp = 0;
+ rc = ECORE_AGAIN;
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+ }
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+ u32 *o_mcp_resp, u32 *o_mcp_param)
+{
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+ loaded--;
+ loaded_port[p_hwfn->port_id]--;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
+ loaded);
+ }
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, OSAL_NULL,
+ o_mcp_resp, o_mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd, u32 param,
+ union drv_union_data *p_union_data,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ u32 union_data_addr;
+ enum _ecore_status_t rc;
+
+ /* MCP not initialized */
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ return ECORE_BUSY;
+ }
+
+ /* Acquiring a spinlock is needed to ensure that only a single thread
+ * is accessing the mailbox at a certain time.
+ */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+
+ if (p_union_data != OSAL_NULL) {
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data,
+ sizeof(*p_union_data));
+ }
+
+ rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
+ o_mcp_param);
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 i_txn_size, u32 *i_buf)
+{
+ union drv_union_data union_data;
+
+ OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
+
+ return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, &union_data,
+ o_mcp_resp, o_mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf)
+{
+ enum _ecore_status_t rc;
+ u32 i;
+
+ /* MCP not initialized */
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ return ECORE_BUSY;
+ }
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+ rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
+ o_mcp_param);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ /* Get payload after operation completes successfully */
+ *o_txn_size = *o_mcp_param;
+ for (i = 0; i < *o_txn_size; i += 4)
+ o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
+ union_data.raw_data[i]);
+
+out:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ return rc;
+}
+
+#ifndef ASIC_ONLY
+static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
+ u32 *p_load_code)
+{
+ static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+ if (!loaded)
+ load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+ else if (!loaded_port[p_hwfn->port_id])
+ load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+
+ /* On CMT, always tell that it's engine */
+ if (p_hwfn->p_dev->num_hwfns > 1)
+ load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+ *p_load_code = load_phase;
+ loaded++;
+ loaded_port[p_hwfn->port_id]++;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
+ *p_load_code, loaded, p_hwfn->port_id,
+ loaded_port[p_hwfn->port_id]);
+}
+#endif
+
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_load_code)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ union drv_union_data union_data;
+ u32 param;
+ enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ ecore_mcp_mf_workaround(p_hwfn, p_load_code);
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+ (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ p_dev->drv_type),
+ &union_data, p_load_code, &param);
+
+ /* if mcp fails to respond we must abort */
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* If MFW refused (e.g. other port is in diagnostic mode) we
+ * must abort. This can happen in the following cases:
+ * - Other port is in diagnostic mode
+ * - Previously loaded function on the engine is not compliant with
+ * the requester.
+ * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+ * -
+ */
+ if (!(*p_load_code) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+ ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+ DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ return ECORE_BUSY;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+ ECORE_PATH_ID(p_hwfn));
+ u32 disabled_vfs[VF_MAX_STATIC / 32];
+ int i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Reading Disabled VF information from [offset %08x],"
+ " path_addr %08x\n",
+ mfw_path_offsize, path_addr);
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
+ path_addr +
+ OFFSETOF(struct public_path,
+ mcp_vf_disabled) +
+ sizeof(u32) * i);
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+ "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+ }
+
+ if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+ OSAL_VF_FLR_UPDATE(p_hwfn);
+}
+
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *vfs_to_ack)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+ MCP_PF_ID(p_hwfn));
+ union drv_union_data union_data;
+ u32 resp, param;
+ enum _ecore_status_t rc;
+ int i;
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+ "Acking VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+ OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
+ DRV_MSG_CODE_VF_DISABLED_DONE, 0,
+ &union_data, &resp, &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+ "Failed to pass ACK for VF flr to MFW\n");
+ return ECORE_TIMEOUT;
+ }
+
+ /* TMP - clear the ACK bits; should be done by MFW */
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ ecore_wr(p_hwfn, p_ptt,
+ func_addr +
+ OFFSETOF(struct public_func, drv_ack_vf_disabled) +
+ i * sizeof(u32), 0);
+
+ return rc;
+}
+
+static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 transceiver_state;
+
+ transceiver_state = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ transceiver_data));
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
+ "Received transceiver state update [0x%08x] from mfw"
+ "[Addr 0x%x]\n",
+ transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ transceiver_data)));
+
+ transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
+
+ if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+ DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
+ else
+ DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+}
+
+static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool b_reset)
+{
+ struct ecore_mcp_link_state *p_link;
+ u32 status = 0;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+ OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+ if (!b_reset) {
+ status = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, link_status));
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
+ "Received link update [0x%08x] from mfw"
+ " [Addr 0x%x]\n",
+ status, (u32)(p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ link_status)));
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Resetting link indications\n");
+ return;
+ }
+
+ if (p_hwfn->b_drv_link_init)
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ else
+ p_link->link_up = false;
+
+ p_link->full_duplex = true;
+ switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+ case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+ p_link->speed = 100000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+ p_link->speed = 50000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+ p_link->speed = 40000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+ p_link->speed = 25000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+ p_link->speed = 20000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+ p_link->speed = 10000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+ p_link->full_duplex = false;
+ /* Fall-through */
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+ p_link->speed = 1000;
+ break;
+ default:
+ p_link->speed = 0;
+ }
+
+ /* We never store total line speed as p_link->speed is
+ * again changes according to bandwidth allocation.
+ */
+ if (p_link->link_up && p_link->speed)
+ p_link->line_speed = p_link->speed;
+ else
+ p_link->line_speed = 0;
+
+ /* Correct speed according to bandwidth allocation */
+ if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+ u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+
+ __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+ }
+
+ if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
+ u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+ __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+
+ ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+ p_link->min_pf_rate);
+ }
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+ p_link->parallel_detection = !!(status &
+ LINK_STATUS_PARALLEL_DETECTION_USED);
+ p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_10G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_20G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_25G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_40G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_50G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+ ECORE_LINK_PARTNER_SPEED_100G : 0;
+
+ p_link->partner_tx_flow_ctrl_en =
+ !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+ p_link->partner_rx_flow_ctrl_en =
+ !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+ switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+ case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+ p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
+ break;
+ default:
+ p_link->partner_adv_pause = 0;
+ }
+
+ p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+ if (p_link->link_up)
+ ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
+
+ OSAL_LINK_UPDATE(p_hwfn);
+}
+
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool b_up)
+{
+ struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ union drv_union_data union_data;
+ struct pmm_phy_cfg *p_phy_cfg;
+ u32 param = 0, reply = 0, cmd;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+#endif
+
+ /* Set the shmem configuration according to params */
+ p_phy_cfg = &union_data.drv_phy_cfg;
+ OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
+ cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+ if (!params->speed.autoneg)
+ p_phy_cfg->speed = params->speed.forced_speed;
+ p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+ p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+ p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ p_phy_cfg->adv_speed = params->speed.advertised_speeds;
+ p_phy_cfg->loopback_mode = params->loopback_mode;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn,
+ "Link on FPGA - Ask for loopback mode '5' at 10G\n");
+ p_phy_cfg->loopback_mode = 5;
+ p_phy_cfg->speed = 10000;
+ }
+#endif
+
+ p_hwfn->b_drv_link_init = b_up;
+
+ if (b_up)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
+ " adv_speed 0x%08x, loopback 0x%08x,"
+ " features 0x%08x\n",
+ p_phy_cfg->speed, p_phy_cfg->pause,
+ p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
+ p_phy_cfg->feature_config_flags);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, 0, &union_data, &reply,
+ &param);
+
+ /* if mcp fails to respond we must abort */
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* Reset the link status if needed */
+ if (!b_up)
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+ return rc;
+}
+
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
+ path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
+
+ proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
+ path_addr +
+ OFFSETOF(struct public_path, process_kill)) &
+ PROCESS_KILL_COUNTER_MASK;
+
+ return proc_kill_cnt;
+}
+
+static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u32 proc_kill_cnt;
+
+ /* Prevent possible attentions/interrupts during the recovery handling
+ * and till its load phase, during which they will be re-enabled.
+ */
+ ecore_int_igu_disable_int(p_hwfn, p_ptt);
+
+ DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
+
+ /* The following operations should be done once, and thus in CMT mode
+ * are carried out by only the first HW function.
+ */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
+ return;
+
+ if (p_dev->recov_in_prog) {
+ DP_NOTICE(p_hwfn, false,
+ "Ignoring the indication since a recovery"
+ " process is already in progress\n");
+ return;
+ }
+
+ p_dev->recov_in_prog = true;
+
+ proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
+ DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
+
+ OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
+}
+
+static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum MFW_DRV_MSG_TYPE type)
+{
+ enum ecore_mcp_protocol_type stats_type;
+ union ecore_mcp_protocol_stats stats;
+ u32 hsi_param, param = 0, reply = 0;
+ union drv_union_data union_data;
+
+ switch (type) {
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ stats_type = ECORE_MCP_LAN_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
+ return;
+ }
+
+ OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
+
+ OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
+
+ ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_GET_STATS,
+ hsi_param, &union_data, &reply, &param);
+}
+
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct public_func *p_data, int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+ size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
+}
+
+static void
+ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct ecore_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ /* TODO - bandwidth min/max should have valid values of 1-100,
+ * as well as some indication that the feature is disabled.
+ * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
+ * limit and correct value to min `1' and max `100' if limit isn't in
+ * range.
+ */
+ p_info->bandwidth_min = (p_shmem_info->config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = (p_shmem_info->config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
+static void
+ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_function_info *p_info;
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+ ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
+
+ ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
+
+ /* Acknowledge the MFW */
+ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
+}
+
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
+ return;
+
+ DP_NOTICE(p_hwfn, false,
+ "Fan failure was detected on the network interface card"
+ " and it's going to be shut down.\n");
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
+}
+
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_info *info = p_hwfn->mcp_info;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
+
+ /* Read Messages from MFW */
+ ecore_mcp_read_mb(p_hwfn, p_ptt);
+
+ /* Compare current messages to old ones */
+ for (i = 0; i < info->mfw_mb_length; i++) {
+ if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+ continue;
+
+ found = true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+ i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+ switch (i) {
+ case MFW_DRV_MSG_LINK_CHANGE:
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
+ break;
+ case MFW_DRV_MSG_VF_DISABLED:
+ ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_REMOTE_LLDP_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_REMOTE_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ break;
+ case MFW_DRV_MSG_ERROR_RECOVERY:
+ ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+ break;
+ case MFW_DRV_MSG_BW_UPDATE:
+ ecore_mcp_update_bw(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+ ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ break;
+ default:
+ /* @DPDK */
+ DP_NOTICE(p_hwfn, false,
+ "Unimplemented MFW message %d\n", i);
+ rc = ECORE_INVAL;
+ }
+ }
+
+ /* ACK everything */
+ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+ OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
+
+ /* MFW expect answer in BE, so we force write in that format */
+ ecore_wr(p_hwfn, p_ptt,
+ info->mfw_mb_addr + sizeof(u32) +
+ MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+ sizeof(u32) + i * sizeof(u32), val);
+ }
+
+ if (!found) {
+ DP_NOTICE(p_hwfn, false,
+ "Received an MFW message indication but no"
+ " new message!\n");
+ rc = ECORE_INVAL;
+ }
+
+ /* Copy the new mfw messages into the shadow */
+ OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
+ u32 *p_mfw_ver,
+ u32 *p_running_bundle_id)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 global_offsize;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
+ return ECORE_SUCCESS;
+ }
+#endif
+
+ if (IS_VF(p_dev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return ECORE_SUCCESS;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV,
+ "VF requested MFW vers prior to ACQUIRE\n");
+ return ECORE_INVAL;
+ }
+
+ global_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+ public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver =
+ ecore_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + OFFSETOF(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id != OSAL_NULL) {
+ *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) +
+ OFFSETOF(struct public_global,
+ running_bundle_id));
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+ u32 *p_media_type)
+{
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
+ struct ecore_ptt *p_ptt;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ return ECORE_BUSY;
+ }
+
+ *p_media_type = MEDIA_UNSPECIFIED;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, media_type));
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_info,
+ enum ecore_pci_personality *p_proto)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+ case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+ *p_proto = ECORE_PCI_ETH;
+ break;
+ default:
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_function_info *info;
+ struct public_func shmem_info;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+ info = &p_hwfn->mcp_info->func_info;
+
+ info->pause_on_host = (shmem_info.config &
+ FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
+ DP_ERR(p_hwfn, "Unknown personality %08x\n",
+ (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+ return ECORE_INVAL;
+ }
+
+ ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ if (shmem_info.mac_upper || shmem_info.mac_lower) {
+ info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+ info->mac[1] = (u8)(shmem_info.mac_upper);
+ info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+ info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+ info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+ info->mac[5] = (u8)(shmem_info.mac_lower);
+ } else {
+ /* TODO - are there protocols for which there's no MAC? */
+ DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
+ }
+
+ info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
+ "Read configuration from shmem: pause_on_host %02x"
+ " protocol %02x BW [%02x - %02x]"
+ " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %" PRIx64
+ " node %" PRIx64 " ovlan %04x\n",
+ info->pause_on_host, info->protocol,
+ info->bandwidth_min, info->bandwidth_max,
+ info->mac[0], info->mac[1], info->mac[2],
+ info->mac[3], info->mac[4], info->mac[5],
+ info->wwn_port, info->wwn_node, info->ovlan);
+
+ return ECORE_SUCCESS;
+}
+
+struct ecore_mcp_link_params
+*ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->link_input;
+}
+
+struct ecore_mcp_link_state
+*ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
+ p_hwfn->mcp_info->link_output.link_up = true;
+ }
+#endif
+
+ return &p_hwfn->mcp_info->link_output;
+}
+
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->link_capabilities;
+}
+
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NIG_DRAIN, 100, &resp, &param);
+
+ /* Wait for the drain to complete before returning */
+ OSAL_MSLEEP(120);
+
+ return rc;
+}
+
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return OSAL_NULL;
+ return &p_hwfn->mcp_info->func_info;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_nvm_params *params)
+{
+ enum _ecore_status_t rc;
+
+ switch (params->type) {
+ case ECORE_MCP_NVM_RD:
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+ params->nvm_common.offset,
+ &params->nvm_common.resp,
+ &params->nvm_common.param,
+ params->nvm_rd.buf_size,
+ params->nvm_rd.buf);
+ break;
+ case ECORE_MCP_CMD:
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+ params->nvm_common.offset,
+ &params->nvm_common.resp,
+ &params->nvm_common.param);
+ break;
+ case ECORE_MCP_NVM_WR:
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+ params->nvm_common.offset,
+ &params->nvm_common.resp,
+ &params->nvm_common.param,
+ params->nvm_wr.buf_size,
+ params->nvm_wr.buf);
+ break;
+ default:
+ rc = ECORE_NOTIMPL;
+ break;
+ }
+ return rc;
+}
+
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 personalities)
+{
+ enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
+ struct public_func shmem_info;
+ int i, count = 0, num_pfs;
+
+ num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+
+ for (i = 0; i < num_pfs; i++) {
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID_BY_REL(p_hwfn, i));
+ if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+ &protocol) != ECORE_SUCCESS)
+ continue;
+
+ if ((1 << ((u32)protocol)) & personalities)
+ count++;
+ }
+
+ return count;
+}
+
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_flash_size)
+{
+ u32 flash_size;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
+ return ECORE_INVAL;
+ }
+#endif
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+ flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+ *p_flash_size = flash_size;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ if (p_dev->recov_in_prog) {
+ DP_NOTICE(p_hwfn, false,
+ "Avoid triggering a recovery since such a process"
+ " is already in progress\n");
+ return ECORE_AGAIN;
+ }
+
+ DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
+{
+ u32 resp = 0, param = 0, rc_param = 0;
+ enum _ecore_status_t rc;
+
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+ &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
+ vf_id);
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_version *p_ver)
+{
+ u32 param = 0, reply = 0, num_words, i;
+ struct drv_version_stc *p_drv_version;
+ union drv_union_data union_data;
+ void *p_name;
+ OSAL_BE32 val;
+ enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+#endif
+
+ p_drv_version = &union_data.drv_version;
+ p_drv_version->version = p_ver->version;
+ num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
+ for (i = 0; i < num_words; i++) {
+ p_name = &p_ver->name[i * sizeof(u32)];
+ val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
+ *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ }
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0,
+ &union_data, &reply, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 value, cpu_mode;
+
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+ value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+ cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+
+ return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_config_method config,
+ enum ecore_ov_client client)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+
+ switch (config) {
+ case ECORE_OV_CLIENT_DRV:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
+ break;
+ case ECORE_OV_CLIENT_USER:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_driver_state drv_state)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+
+ switch (drv_state) {
+ case ECORE_OV_DRIVER_STATE_NOT_LOADED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
+ break;
+ case ECORE_OV_DRIVER_STATE_DISABLED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
+ break;
+ case ECORE_OV_DRIVER_STATE_ACTIVE:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
+ drv_state, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_fc_npiv_tbl *p_table)
+{
+ return 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mtu)
+{
+ return 0;
+}
+
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_led_mode mode)
+{
+ u32 resp = 0, param = 0, drv_mb_param;
+ enum _ecore_status_t rc;
+
+ switch (mode) {
+ case ECORE_LED_MODE_ON:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
+ break;
+ case ECORE_LED_MODE_OFF:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
+ break;
+ case ECORE_LED_MODE_RESTORE:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 mask_parities)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+ mask_parities, &resp, &param);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "MCP response failure for mask parities, aborting\n");
+ } else if (resp != FW_MSG_CODE_OK) {
+ DP_ERR(p_hwfn,
+ "MCP did not ack mask parity request. Old MFW?\n");
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+ u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 bytes_left, offset, bytes_to_copy, buf_size;
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ bytes_left = len;
+ offset = 0;
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_rd.buf_size = &buf_size;
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
+ while (bytes_left > 0) {
+ bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+ MCP_DRV_NVM_BUF_LEN);
+ params.nvm_common.offset = (addr + offset) |
+ (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
+ params.nvm_rd.buf = (u32 *)(p_buf + offset);
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
+ FW_MSG_CODE_NVM_OK)) {
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ break;
+ }
+ offset += *params.nvm_rd.buf_size;
+ bytes_left -= *params.nvm_rd.buf_size;
+ }
+
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_rd.buf_size = &len;
+ params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
+ DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
+ params.nvm_common.offset = addr;
+ params.nvm_rd.buf = (u32 *)p_buf;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_CMD;
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
+ params.nvm_common.offset = addr;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+ u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_CMD;
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+ params.nvm_common.offset = addr;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+/* rc receives ECORE_INVAL as default parameter because
+ * it might not enter the while loop if the len is 0
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ u32 buf_idx, buf_size;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_NVM_WR;
+ if (cmd == ECORE_PUT_FILE_DATA)
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ else
+ params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ buf_idx = 0;
+ while (buf_idx < len) {
+ buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+ MCP_DRV_NVM_BUF_LEN);
+ params.nvm_common.offset = ((buf_size <<
+ DRV_MB_PARAM_NVM_LEN_SHIFT)
+ | addr) + buf_idx;
+ params.nvm_wr.buf_size = buf_size;
+ params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS ||
+ ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
+ (params.nvm_common.resp !=
+ FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+ buf_idx += buf_size;
+ }
+
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_NVM_WR;
+ params.nvm_wr.buf_size = len;
+ params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
+ DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
+ params.nvm_common.offset = addr;
+ params.nvm_wr.buf = (u32 *)p_buf;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+ u32 addr)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_mcp_nvm_params params;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_BUSY;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.type = ECORE_MCP_CMD;
+ params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
+ params.nvm_common.offset = addr;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf)
+{
+ struct ecore_mcp_nvm_params params;
+ enum _ecore_status_t rc;
+ u32 bytes_left, bytes_to_copy, buf_size;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ SET_FIELD(params.nvm_common.offset,
+ DRV_MB_PARAM_TRANSCEIVER_PORT, port);
+ SET_FIELD(params.nvm_common.offset,
+ DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
+ addr = offset;
+ offset = 0;
+ bytes_left = len;
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_rd.buf_size = &buf_size;
+ params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
+ while (bytes_left > 0) {
+ bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+ MAX_I2C_TRANSACTION_SIZE);
+ params.nvm_rd.buf = (u32 *)(p_buf + offset);
+ SET_FIELD(params.nvm_common.offset,
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
+ SET_FIELD(params.nvm_common.offset,
+ DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+ FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ return ECORE_NODEV;
+ } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ offset += *params.nvm_rd.buf_size;
+ bytes_left -= *params.nvm_rd.buf_size;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf)
+{
+ struct ecore_mcp_nvm_params params;
+ enum _ecore_status_t rc;
+ u32 buf_idx, buf_size;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ SET_FIELD(params.nvm_common.offset,
+ DRV_MB_PARAM_TRANSCEIVER_PORT, port);
+ SET_FIELD(params.nvm_common.offset,
+ DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
+ params.type = ECORE_MCP_NVM_WR;
+ params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
+ buf_idx = 0;
+ while (buf_idx < len) {
+ buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+ MAX_I2C_TRANSACTION_SIZE);
+ SET_FIELD(params.nvm_common.offset,
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
+ SET_FIELD(params.nvm_common.offset,
+ DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
+ params.nvm_wr.buf_size = buf_size;
+ params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+ FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ return ECORE_NODEV;
+ } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ buf_idx += buf_size;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_val)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 drv_mb_param = 0, rsp;
+
+ SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
+ drv_mb_param, &rsp, gpio_val);
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u16 gpio_val)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 drv_mb_param = 0, param, rsp;
+
+ SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
+ SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
+ drv_mb_param, &rsp, &param);
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
new file mode 100644
index 00000000..448c30bb
--- /dev/null
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_MCP_H__
+#define __ECORE_MCP_H__
+
+#include "bcm_osal.h"
+#include "mcp_public.h"
+#include "ecore_mcp_api.h"
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in ecore_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \
+ ((rel_pfid) | \
+ ((p_hwfn)->abs_pf_id & 1) << 3) : \
+ rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->p_dev->num_ports_in_engines * 2))
+struct ecore_mcp_info {
+ osal_spinlock_t lock; /* Spinlock used for accessing MCP mailbox */
+ u32 public_base; /* Address of the MCP public area */
+ u32 drv_mb_addr; /* Address of the driver mailbox */
+ u32 mfw_mb_addr; /* Address of the MFW mailbox */
+ u32 port_addr; /* Address of the port configuration (link) */
+ u16 drv_mb_seq; /* Current driver mailbox sequence */
+ u16 drv_pulse_seq; /* Current driver pulse sequence */
+ struct ecore_mcp_link_params link_input;
+ struct ecore_mcp_link_state link_output;
+ struct ecore_mcp_link_capabilities link_capabilities;
+ struct ecore_mcp_function_info func_info;
+
+ u8 *mfw_mb_cur;
+ u8 *mfw_mb_shadow;
+ u16 mfw_mb_length;
+ u16 mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successul.
+ */
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief When MFW doesn't get driver pulse for couple of seconds, at some
+ * threshold before timeout expires, it will generate interrupt
+ * through a dedicated status block (DPSB - Driver Pulse Status
+ * Block), which the driver should respond immediately, by
+ * providing keepalive indication after setting the PTT to the
+ * driver-MFW mailbox. This function is called directly from the
+ * DPC upon receiving the DPSB attention.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successul.
+ */
+enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ * succeed, returns whether this PF is the first on the
+ * chip/engine/port or function. This function should be
+ * called when driver is ready to accept MFW events after
+ * Storms initializations are done.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_load_code - The MCP response param containing one
+ * of the following:
+ * FW_MSG_CODE_DRV_LOAD_ENGINE
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successul.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return enum _ecore_status_t - ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *vfs_to_ack);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Sets the union data in the MCP mailbox and sends a mailbox command.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param cmd - command to be sent to the MCP
+ * @param param - optional param
+ * @param p_union_data - pointer to a drv_union_data
+ * @param o_mcp_resp - the MCP response code (exclude sequence)
+ * @param o_mcp_param - optional parameter provided by the MCP response
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - operation was successful
+ * ECORE_BUSY - operation failed
+ */
+enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd, u32 param,
+ union drv_union_data *p_union_data,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
+ * @brief - Sends an NVM write command request to the MFW with
+ * payload.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
+ * DRV_MSG_CODE_NVM_PUT_FILE_DATA
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param i_txn_size - Buffer size
+ * @param i_buf - Pointer to the buffer
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 i_txn_size, u32 *i_buf);
+
+/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size - Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num);
+
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 max_bw);
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link,
+ u8 min_bw);
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 mask_parities);
+#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
new file mode 100644
index 00000000..7360b356
--- /dev/null
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -0,0 +1,611 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_MCP_API_H__
+#define __ECORE_MCP_API_H__
+
+#include "ecore_status.h"
+
+struct ecore_mcp_link_speed_params {
+ bool autoneg;
+ u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
+ u32 forced_speed; /* In Mb/s */
+};
+
+struct ecore_mcp_link_pause_params {
+ bool autoneg;
+ bool forced_rx;
+ bool forced_tx;
+};
+
+struct ecore_mcp_link_params {
+ struct ecore_mcp_link_speed_params speed;
+ struct ecore_mcp_link_pause_params pause;
+ u32 loopback_mode; /* in PMM_LOOPBACK values */
+};
+
+struct ecore_mcp_link_capabilities {
+ u32 speed_capabilities;
+};
+
+struct ecore_mcp_link_state {
+ bool link_up;
+
+ u32 line_speed; /* In Mb/s */
+ u32 min_pf_rate; /* In Mb/s */
+ u32 speed; /* In Mb/s */
+ bool full_duplex;
+
+ bool an;
+ bool an_complete;
+ bool parallel_detection;
+ bool pfc_enabled;
+
+#define ECORE_LINK_PARTNER_SPEED_1G_HD (1 << 0)
+#define ECORE_LINK_PARTNER_SPEED_1G_FD (1 << 1)
+#define ECORE_LINK_PARTNER_SPEED_10G (1 << 2)
+#define ECORE_LINK_PARTNER_SPEED_20G (1 << 3)
+#define ECORE_LINK_PARTNER_SPEED_25G (1 << 4)
+#define ECORE_LINK_PARTNER_SPEED_40G (1 << 5)
+#define ECORE_LINK_PARTNER_SPEED_50G (1 << 6)
+#define ECORE_LINK_PARTNER_SPEED_100G (1 << 7)
+ u32 partner_adv_speed;
+
+ bool partner_tx_flow_ctrl_en;
+ bool partner_rx_flow_ctrl_en;
+
+#define ECORE_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define ECORE_LINK_PARTNER_BOTH_PAUSE (3)
+ u8 partner_adv_pause;
+
+ bool sfp_tx_fault;
+};
+
+struct ecore_mcp_function_info {
+ u8 pause_on_host;
+
+ enum ecore_pci_personality protocol;
+
+ u8 bandwidth_min;
+ u8 bandwidth_max;
+
+ u8 mac[ETH_ALEN];
+
+ u64 wwn_port;
+ u64 wwn_node;
+
+#define ECORE_MCP_VLAN_UNSET (0xffff)
+ u16 ovlan;
+};
+
+struct ecore_mcp_nvm_common {
+ u32 offset;
+ u32 param;
+ u32 resp;
+ u32 cmd;
+};
+
+struct ecore_mcp_nvm_rd {
+ u32 *buf_size;
+ u32 *buf;
+};
+
+struct ecore_mcp_nvm_wr {
+ u32 buf_size;
+ u32 *buf;
+};
+
+struct ecore_mcp_nvm_params {
+#define ECORE_MCP_CMD (1 << 0)
+#define ECORE_MCP_NVM_RD (1 << 1)
+#define ECORE_MCP_NVM_WR (1 << 2)
+ u8 type;
+
+ struct ecore_mcp_nvm_common nvm_common;
+
+ union {
+ struct ecore_mcp_nvm_rd nvm_rd;
+ struct ecore_mcp_nvm_wr nvm_wr;
+ };
+};
+
+struct ecore_mcp_drv_version {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct ecore_mcp_lan_stats {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+};
+
+#ifndef ECORE_PROTO_STATS
+#define ECORE_PROTO_STATS
+
+enum ecore_mcp_protocol_type {
+ ECORE_MCP_LAN_STATS,
+};
+
+union ecore_mcp_protocol_stats {
+ struct ecore_mcp_lan_stats lan_stats;
+};
+#endif
+
+enum ecore_ov_config_method {
+ ECORE_OV_CONFIG_MTU,
+ ECORE_OV_CONFIG_MAC,
+ ECORE_OV_CONFIG_WOL
+};
+
+enum ecore_ov_client {
+ ECORE_OV_CLIENT_DRV,
+ ECORE_OV_CLIENT_USER
+};
+
+enum ecore_ov_driver_state {
+ ECORE_OV_DRIVER_STATE_NOT_LOADED,
+ ECORE_OV_DRIVER_STATE_DISABLED,
+ ECORE_OV_DRIVER_STATE_ACTIVE
+};
+
+#define ECORE_MAX_NPIV_ENTRIES 128
+#define ECORE_WWN_SIZE 8
+struct ecore_fc_npiv_tbl {
+ u32 count;
+ u8 wwpn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+ u8 wwnn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_led_mode {
+ ECORE_LED_MODE_OFF,
+ ECORE_LED_MODE_ON,
+ ECORE_LED_MODE_RESTORE
+};
+#endif
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_mfw_ver - mfw version value
+ * @param p_running_bundle_id - image id in nvram; Optional.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
+ u32 *p_mfw_ver,
+ u32 *p_running_bundle_id);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param mfw_ver - media type value
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+ u32 *media_type);
+
+/**
+ * @brief - Sends a command to the MCP mailbox.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param cmd - command to be sent to the MCP
+ * @param param - optional param
+ * @param o_mcp_resp - the MCP response code (exclude sequence)
+ * @param o_mcp_param - optional parameter provided by the MCP response
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - operation was successful
+ * ECORE_BUSY - operation failed
+ */
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+ u32 *o_mcp_resp, u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ * (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - return the mcp function info of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to mcp function info
+ */
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief - Function for reading/manipulating the nvram. Following are supported
+ * functionalities.
+ * 1. Read: Read the specified nvram offset.
+ * input values:
+ * type - ECORE_MCP_NVM_RD
+ * cmd - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM)
+ * offset - nvm offset
+ *
+ * output values:
+ * buf - buffer
+ * buf_size - buffer size
+ *
+ * 2. Write: Write the data at the specified nvram offset
+ * input values:
+ * type - ECORE_MCP_NVM_WR
+ * cmd - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM)
+ * offset - nvm offset
+ * buf - buffer
+ * buf_size - buffer size
+ *
+ * 3. Command: Send the NVM command to MCP.
+ * input values:
+ * type - ECORE_MCP_CMD
+ * cmd - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE)
+ * offset - nvm offset
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param params
+ *
+ * @return ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_nvm_params *params);
+
+/**
+ * @brief - count number of function with a matching personality on engine.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param personalities - a bitmask of ecore_pci_personality values
+ *
+ * @returns the count of all devices on engine whose personality match one of
+ * the bitsmasks.
+ */
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 personalities);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size - flash size in bytes to be filled.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_version *p_ver);
+
+/**
+ * @brief Read the MFW process kill counter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Trigger a recovery process
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Notify MFW about the change in base device properties
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param config - Configuation that has been updated
+ * @param client - ecore client type
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_config_method config,
+ enum ecore_ov_client client);
+
+/**
+ * @brief Notify MFW about the driver state
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param drv_state - Driver state
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_ov_driver_state drv_state);
+
+/**
+ * @brief Read NPIV settings form the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_table - Array to hold the FC NPIV data. Client need allocate the
+ * required buffer. The field 'count' specifies number of NPIV
+ * entries. A value of 0 means the table was not populated.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_fc_npiv_tbl *p_table);
+
+/**
+ * @brief Send MTU size to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mtu - MTU size
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mtu);
+
+/**
+ * @brief Set LED status
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mode - LED mode
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_led_mode mode);
+
+/**
+ * @brief Set secure mode
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+ u32 addr);
+
+/**
+ * @brief Write to phy
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Write to nvm
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Put file begin
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+ u32 addr);
+
+/**
+ * @brief Delete file
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr);
+
+/**
+ * @brief Check latest response
+ *
+ * @param p_dev
+ * @param p_buf - nvm write buffer
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
+
+/**
+ * @brief Read from phy
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+ u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from nvm
+ *
+ * @param p_dev
+ * @param addr - nvm offset
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+ u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to read into
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf);
+
+/**
+ * @brief Write to sfp
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param port - transceiver port
+ * @param addr - I2C address
+ * @param offset - offset in sfp
+ * @param len - buffer length
+ * @param p_buf - buffer to write from
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset,
+ u32 len, u8 *p_buf);
+
+/**
+ * @brief Gpio read
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_val - value read from gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_val);
+
+/**
+ * @brief Gpio write
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_val - value to write to gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u16 gpio_val);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
new file mode 100644
index 00000000..2fecbc86
--- /dev/null
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_PROTO_IF_H__
+#define __ECORE_PROTO_IF_H__
+
+/*
+ * PF parameters (according to personality/protocol)
+ */
+
+struct ecore_eth_pf_params {
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+};
+
+struct ecore_pf_params {
+ struct ecore_eth_pf_params eth_pf_params;
+};
+
+#endif
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
new file mode 100644
index 00000000..1f5139ea
--- /dev/null
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __RT_DEFS_H__
+#define __RT_DEFS_H__
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30799
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30815
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30849
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31523
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32547
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33059
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33848
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33849
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33850
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33851
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33852
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33853
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33860
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33861
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33862
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33863
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33864
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33866
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33869
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33872
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33875
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33878
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33881
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33884
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33887
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33890
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33893
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33896
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33899
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33902
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33905
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33908
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33911
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33914
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33917
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33920
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33922
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33923
+
+#define RUNTIME_ARRAY_SIZE 33924
+
+#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
new file mode 100644
index 00000000..e80f5ef3
--- /dev/null
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SP_API_H__
+#define __ECORE_SP_API_H__
+
+#include "ecore_status.h"
+
+enum spq_mode {
+ ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ ECORE_SPQ_MODE_CB, /* Client supplies a callback */
+ ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */
+};
+
+struct ecore_hwfn;
+union event_ring_data;
+struct eth_slow_path_rx_cqe;
+
+struct ecore_spq_comp_cb {
+ void (*function)(struct ecore_hwfn *,
+ void *, union event_ring_data *, u8 fw_return_code);
+ void *cookie;
+};
+
+/**
+ * @brief ecore_eth_cqe_completion - handles the completion of a
+ * ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+
+#endif
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
new file mode 100644
index 00000000..e9ac8988
--- /dev/null
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_dcbx.h"
+
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent,
+ u8 cmd,
+ u8 protocol,
+ struct ecore_sp_init_data *p_data)
+{
+ u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get an SPQ entry */
+ rc = ecore_spq_get_entry(p_hwfn, pp_ent);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Fill the SPQ entry */
+ p_ent = *pp_ent;
+ p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
+ p_ent->elem.hdr.cmd_id = cmd;
+ p_ent->elem.hdr.protocol_id = protocol;
+ p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
+ p_ent->comp_mode = p_data->comp_mode;
+ p_ent->comp_done.done = 0;
+
+ switch (p_ent->comp_mode) {
+ case ECORE_SPQ_MODE_EBLOCK:
+ p_ent->comp_cb.cookie = &p_ent->comp_done;
+ break;
+
+ case ECORE_SPQ_MODE_BLOCK:
+ if (!p_data->p_comp_data)
+ return ECORE_INVAL;
+
+ p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
+ break;
+
+ case ECORE_SPQ_MODE_CB:
+ if (!p_data->p_comp_data)
+ p_ent->comp_cb.function = OSAL_NULL;
+ else
+ p_ent->comp_cb = *p_data->p_comp_data;
+ break;
+
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+ opaque_cid, cmd, protocol,
+ (unsigned long)&p_ent->ramrod,
+ D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+ ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
+
+ return ECORE_SUCCESS;
+}
+
+static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+{
+ switch (type) {
+ case ECORE_TUNN_CLSS_MAC_VLAN:
+ return TUNNEL_CLSS_MAC_VLAN;
+ case ECORE_TUNN_CLSS_MAC_VNI:
+ return TUNNEL_CLSS_MAC_VNI;
+ case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
+ return TUNNEL_CLSS_INNER_MAC_VLAN;
+ case ECORE_TUNN_CLSS_INNER_MAC_VNI:
+ return TUNNEL_CLSS_INNER_MAC_VNI;
+ default:
+ return TUNNEL_CLSS_MAC_VLAN;
+ }
+}
+
+static void
+ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
+ unsigned long update_mask = p_src->tunn_mode_update_mask;
+ unsigned long tunn_mode = p_src->tunn_mode;
+ unsigned long new_tunn_mode = 0;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
+ } else {
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
+ } else {
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
+ if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
+ } else {
+ if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
+ }
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ if (p_src->update_geneve_udp_port)
+ DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+ p_src->update_geneve_udp_port = 0;
+ p_src->tunn_mode = new_tunn_mode;
+ return;
+ }
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+ OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ }
+
+ p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode = p_src->tunn_mode;
+ enum tunnel_clss type;
+
+ ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+ p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+ p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port =
+ OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ if (p_src->update_geneve_udp_port)
+ DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+ p_src->update_geneve_udp_port = 0;
+ return;
+ }
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ unsigned long tunn_mode)
+{
+ u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+ u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+ l2gre_enable = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+ ipgre_enable = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+ vxlan_enable = 1;
+
+ ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+ ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+ return;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+ l2geneve_enable = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+ ipgeneve_enable = 1;
+
+ ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+ ipgeneve_enable);
+}
+
+static void
+ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunn_start_params *p_src,
+ struct pf_start_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode;
+ enum tunnel_clss type;
+
+ if (!p_src)
+ return;
+
+ tunn_mode = p_src->tunn_mode;
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port =
+ OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ if (p_src->update_geneve_udp_port)
+ DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+ p_src->update_geneve_udp_port = 0;
+ return;
+ }
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+ type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunn_start_params *p_tunn,
+ enum ecore_mf_mode mode,
+ bool allow_npar_tx_switch)
+{
+ struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
+ u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+ u8 page_cnt;
+
+ /* update initial eq producer */
+ ecore_eq_prod_update(p_hwfn,
+ ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+ /* Initialize the SPQ entry for the ramrod */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Fill the ramrod data */
+ p_ramrod = &p_ent->ramrod.pf_start;
+ p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
+ p_ramrod->event_ring_sb_index = sb_index;
+ p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
+ p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+ /* For easier debugging */
+ p_ramrod->dont_log_ramrods = 0;
+ p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+
+ switch (mode) {
+ case ECORE_MF_DEFAULT:
+ case ECORE_MF_NPAR:
+ p_ramrod->mf_mode = MF_NPAR;
+ break;
+ case ECORE_MF_OVLAN:
+ p_ramrod->mf_mode = MF_OVLAN;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Unsupported MF mode, init as DEFAULT\n");
+ p_ramrod->mf_mode = MF_NPAR;
+ }
+
+ /* Place EQ address in RAMROD */
+ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+ p_hwfn->p_eq->chain.pbl.p_phys_table);
+ page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+ p_ramrod->event_ring_num_pages = page_cnt;
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+ p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+ ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
+ &p_ramrod->tunnel_config);
+
+ if (IS_MF_SI(p_hwfn))
+ p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ p_ramrod->personality = PERSONALITY_ETH;
+ }
+
+ p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf;
+ p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
+ sb, sb_index, p_ramrod->outer_tag);
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+ if (p_tunn) {
+ ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->tunn_mode);
+ p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+ &p_ent->ramrod.pf_update);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+/* Set pf update ramrod command params */
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
+ &p_ent->ramrod.pf_update.tunnel_config);
+
+ rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+ if ((rc == ECORE_SUCCESS) && p_tunn) {
+ if (p_tunn->update_vxlan_udp_port)
+ ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_udp_port);
+ if (p_tunn->update_geneve_udp_port)
+ ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_udp_port);
+
+ ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->tunn_mode);
+ p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
new file mode 100644
index 00000000..e281ab0e
--- /dev/null
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SP_COMMANDS_H__
+#define __ECORE_SP_COMMANDS_H__
+
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "ecore_sp_api.h"
+
+#define ECORE_SP_EQ_COMPLETION 0x01
+#define ECORE_SP_CQE_COMPLETION 0x02
+
+struct ecore_sp_init_data {
+ /* The CID and FID aren't necessarily derived from hwfn,
+ * e.g., in IOV scenarios. CID might defer between SPQ and
+ * other elements.
+ */
+ u32 cid;
+ u16 opaque_fid;
+
+ /* Information regarding operation upon sending & completion */
+ enum spq_mode comp_mode;
+ struct ecore_spq_comp_cb *p_comp_data;
+
+};
+
+/**
+ * @brief Acquire and initialize and SPQ entry for a given ramrod.
+ *
+ * @param p_hwfn
+ * @param pp_ent - will be filled with a pointer to an entry upon success
+ * @param cmd - dependent upon protocol
+ * @param protocol
+ * @param p_data - various configuration required for ramrod
+ *
+ * @return ECORE_SUCCESS upon success, otherwise failure.
+ */
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent,
+ u8 cmd,
+ u8 protocol,
+ struct ecore_sp_init_data *p_data);
+
+/**
+ * @brief ecore_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf start tunneling configuration
+ * @param mode
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ * for vports configured for tx-switching.
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunn_start_params *p_tunn,
+ enum ecore_mf_mode mode,
+ bool allow_npar_tx_switch);
+
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ * update Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn);
+
+#endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
new file mode 100644
index 00000000..b263693f
--- /dev/null
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -0,0 +1,943 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_sp_api.h"
+#include "ecore_spq.h"
+#include "ecore_iro.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_int.h"
+#include "ecore_dev_api.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+#include "ecore_sriov.h"
+
+/***************************************************************************
+ * Structures & Definitions
+ ***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
+#define SPQ_BLOCK_SLEEP_LENGTH (1000)
+
+/***************************************************************************
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct ecore_spq_comp_done *comp_done;
+
+ comp_done = (struct ecore_spq_comp_done *)cookie;
+
+ comp_done->done = 0x1;
+ comp_done->fw_return_code = fw_return_code;
+
+ /* make update visible to waiting thread */
+ OSAL_SMP_WMB(p_hwfn->p_dev);
+}
+
+static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *p_fw_ret)
+{
+ int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ struct ecore_spq_comp_done *comp_done;
+ enum _ecore_status_t rc;
+
+ comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
+ while (sleep_count) {
+ OSAL_POLL_MODE_DPC(p_hwfn);
+ /* validate we receive completion update */
+ OSAL_SMP_RMB(p_hwfn->p_dev);
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return ECORE_SUCCESS;
+ }
+ OSAL_MSLEEP(5);
+ sleep_count--;
+ }
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
+
+ /* Retry after drain */
+ sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+ while (sleep_count) {
+ /* validate we receive completion update */
+ OSAL_SMP_RMB(p_hwfn->p_dev);
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return ECORE_SUCCESS;
+ }
+ OSAL_MSLEEP(5);
+ sleep_count--;
+ }
+
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return ECORE_SUCCESS;
+ }
+
+ DP_NOTICE(p_hwfn, true,
+ "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
+ OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
+ p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
+ OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+
+ return ECORE_BUSY;
+}
+
+/***************************************************************************
+ * SPQ entries inner API
+ ***************************************************************************/
+static enum _ecore_status_t
+ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
+{
+ p_ent->flags = 0;
+
+ switch (p_ent->comp_mode) {
+ case ECORE_SPQ_MODE_EBLOCK:
+ case ECORE_SPQ_MODE_BLOCK:
+ p_ent->comp_cb.function = ecore_spq_blocking_cb;
+ break;
+ case ECORE_SPQ_MODE_CB:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
+ " Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
+ D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+ ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * HSI access
+ ***************************************************************************/
+static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq *p_spq)
+{
+ u16 pq;
+ struct ecore_cxt_info cxt_info;
+ struct core_conn_context *p_cxt;
+ union ecore_qm_pq_params pq_params;
+ enum _ecore_status_t rc;
+
+ cxt_info.iid = p_spq->cid;
+
+ rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+ if (rc < 0) {
+ DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
+ p_spq->cid);
+ return;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ * XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
+ */
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+ /* CDU validation - FIXME currently disabled */
+
+ /* QM physical queue */
+ OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+ pq_params.core.tc = LB_TC;
+ pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
+
+ p_cxt->xstorm_st_context.spq_base_lo =
+ DMA_LO_LE(p_spq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.spq_base_hi =
+ DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+ p_cxt->xstorm_st_context.consolid_base_addr.lo =
+ DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.consolid_base_addr.hi =
+ DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq *p_spq,
+ struct ecore_spq_entry *p_ent)
+{
+ struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+ u16 echo = ecore_chain_get_prod_idx(p_chain);
+ struct slow_path_element *elem;
+ struct core_db_data db;
+
+ p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
+ elem = ecore_chain_produce(p_chain);
+ if (!elem) {
+ DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
+ return ECORE_INVAL;
+ }
+
+ *elem = p_ent->elem; /* struct assignment */
+
+ /* send a doorbell on the slow hwfn session */
+ OSAL_MEMSET(&db, 0, sizeof(db));
+ SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* validate producer is up to-date */
+ OSAL_RMB(p_hwfn->p_dev);
+
+ db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+
+ /* do not reorder */
+ OSAL_BARRIER(p_hwfn->p_dev);
+
+ DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+ /* make sure doorbell is rang */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
+ " agg_params: %02x, prod: %04x\n",
+ DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
+ db.agg_flags, ecore_chain_get_prod_idx(p_chain));
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Asynchronous events
+ ***************************************************************************/
+
+static enum _ecore_status_t
+ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ switch (p_eqe->protocol_id) {
+ case PROTOCOLID_COMMON:
+ return ecore_sriov_eqe_event(p_hwfn,
+ p_eqe->opcode,
+ p_eqe->echo, &p_eqe->data);
+ default:
+ DP_NOTICE(p_hwfn,
+ true, "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return ECORE_INVAL;
+ }
+}
+
+/***************************************************************************
+ * EQ API
+ ***************************************************************************/
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
+{
+ u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ OSAL_MMIOWB(p_hwfn->p_dev);
+}
+
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+ void *cookie)
+{
+ struct ecore_eq *p_eq = cookie;
+ struct ecore_chain *p_chain = &p_eq->chain;
+ enum _ecore_status_t rc = 0;
+
+ /* take a snapshot of the FW consumer */
+ u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+ /* Need to guarantee the fw_cons index we use points to a usuable
+ * element (to comply with our chain), so our macros would comply
+ */
+ if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
+ ecore_chain_get_usable_per_page(p_chain)) {
+ fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
+ }
+
+ /* Complete current segment of eq entries */
+ while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
+ struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
+ if (!p_eqe) {
+ rc = ECORE_INVAL;
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "op %x prot %x res0 %x echo %x "
+ "fwret %x flags %x\n", p_eqe->opcode,
+ p_eqe->protocol_id, /* Event Protocol ID */
+ p_eqe->reserved0, /* Reserved */
+ OSAL_LE16_TO_CPU(p_eqe->echo),
+ p_eqe->fw_return_code, /* FW return code for SP
+ * ramrods
+ */
+ p_eqe->flags);
+
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+ if (ecore_async_event_completion(p_hwfn, p_eqe))
+ rc = ECORE_INVAL;
+ } else if (ecore_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data)) {
+ rc = ECORE_INVAL;
+ }
+
+ ecore_chain_recycle_consumed(p_chain);
+ }
+
+ ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
+
+ return rc;
+}
+
+struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+{
+ struct ecore_eq *p_eq;
+
+ /* Allocate EQ struct */
+ p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
+ if (!p_eq) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_eq'\n");
+ return OSAL_NULL;
+ }
+
+ /* Allocate and initialize EQ chain */
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ num_elem,
+ sizeof(union event_ring_element), &p_eq->chain)) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
+ goto eq_allocate_fail;
+ }
+
+ /* register EQ completion on the SP SB */
+ ecore_int_register_cb(p_hwfn,
+ ecore_eq_completion,
+ p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
+
+ return p_eq;
+
+eq_allocate_fail:
+ ecore_eq_free(p_hwfn, p_eq);
+ return OSAL_NULL;
+}
+
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+{
+ ecore_chain_reset(&p_eq->chain);
+}
+
+void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+{
+ if (!p_eq)
+ return;
+ ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe
+ *cqe,
+ enum protocol_type protocol)
+{
+ if (IS_VF(p_hwfn->p_dev))
+ return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
+
+ /* @@@tmp - it's possible we'll eventually want to handle some
+ * actual commands that can arrive here, but for now this is only
+ * used to complete the ramrod using the echo value on the cqe
+ */
+ return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+ cqe->ramrod_cmd_id);
+ }
+
+ return rc;
+}
+
+/***************************************************************************
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_virt = OSAL_NULL;
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ dma_addr_t p_phys = 0;
+ u32 i, capacity;
+
+ OSAL_LIST_INIT(&p_spq->pending);
+ OSAL_LIST_INIT(&p_spq->completion_pending);
+ OSAL_LIST_INIT(&p_spq->free_pool);
+ OSAL_LIST_INIT(&p_spq->unlimited_pending);
+ OSAL_SPIN_LOCK_INIT(&p_spq->lock);
+
+ /* SPQ empty pool */
+ p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
+ p_virt = p_spq->p_virt;
+
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ for (i = 0; i < capacity; i++) {
+ p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+ p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+ OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
+
+ p_virt++;
+ p_phys += sizeof(struct ecore_spq_entry);
+ }
+
+ /* Statistics */
+ p_spq->normal_count = 0;
+ p_spq->comp_count = 0;
+ p_spq->comp_sent_count = 0;
+ p_spq->unlimited_pending_count = 0;
+
+ OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
+ SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
+ p_spq->comp_bitmap_idx = 0;
+
+ /* SPQ cid, cannot fail */
+ ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+ ecore_spq_hw_initialize(p_hwfn, p_spq);
+
+ /* reset the chain itself */
+ ecore_chain_reset(&p_spq->chain);
+}
+
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_virt = OSAL_NULL;
+ struct ecore_spq *p_spq = OSAL_NULL;
+ dma_addr_t p_phys = 0;
+ u32 capacity;
+
+ /* SPQ struct */
+ p_spq =
+ OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
+ if (!p_spq) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_spq'");
+ return ECORE_NOMEM;
+ }
+
+ /* SPQ ring */
+ if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
+ /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element), &p_spq->chain)) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
+ goto spq_allocate_fail;
+ }
+
+ /* allocate and fill the SPQ elements (incl. ramrod data list) */
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
+ capacity *
+ sizeof(struct ecore_spq_entry));
+ if (!p_virt)
+ goto spq_allocate_fail;
+
+ p_spq->p_virt = p_virt;
+ p_spq->p_phys = p_phys;
+
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+
+ p_hwfn->p_spq = p_spq;
+ return ECORE_SUCCESS;
+
+spq_allocate_fail:
+ ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_spq);
+ return ECORE_NOMEM;
+}
+
+void ecore_spq_free(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ u32 capacity;
+
+ if (!p_spq)
+ return;
+
+ if (p_spq->p_virt) {
+ capacity = ecore_chain_get_capacity(&p_spq->chain);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_spq->p_virt,
+ p_spq->p_phys,
+ capacity *
+ sizeof(struct ecore_spq_entry));
+ }
+
+ ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+ OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+ OSAL_FREE(p_hwfn->p_dev, p_spq);
+}
+
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+
+ if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ sizeof(struct ecore_spq_entry));
+ if (!p_ent) {
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate an SPQ entry"
+ " for a pending ramrod\n");
+ return ECORE_NOMEM;
+ }
+ p_ent->queue = &p_spq->unlimited_pending;
+ } else {
+ p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+ struct ecore_spq_entry, list);
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
+ p_ent->queue = &p_spq->pending;
+ }
+
+ *pp_ent = p_ent;
+
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ return ECORE_SUCCESS;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent)
+{
+ OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent)
+{
+ OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief ecore_spq_add_entry - adds a new entry to the pending
+ * list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent, enum spq_priority priority)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ OSAL_LIST_PUSH_TAIL(&p_ent->list,
+ &p_spq->unlimited_pending);
+ p_spq->unlimited_pending_count++;
+
+ return ECORE_SUCCESS;
+ }
+
+ struct ecore_spq_entry *p_en2;
+
+ p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+ struct ecore_spq_entry,
+ list);
+ OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
+
+ /* Copy the ring element physical pointer to the new
+ * entry, since we are about to override the entire ring
+ * entry and don't want to lose the pointer.
+ */
+ p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+
+ /* Setting the cookie to the comp_done of the
+ * new element.
+ */
+ if (p_ent->comp_cb.cookie == &p_ent->comp_done)
+ p_ent->comp_cb.cookie = &p_en2->comp_done;
+
+ *p_en2 = *p_ent;
+
+ OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+ p_ent = p_en2;
+ }
+
+ /* entry is to be placed in 'pending' queue */
+ switch (priority) {
+ case ECORE_SPQ_PRIORITY_NORMAL:
+ OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
+ p_spq->normal_count++;
+ break;
+ case ECORE_SPQ_PRIORITY_HIGH:
+ OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
+ p_spq->high_count++;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Accessor
+ ***************************************************************************/
+
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_spq)
+ return 0xffffffff; /* illegal */
+ return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+ * Posting new Ramrods
+ ***************************************************************************/
+
+static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
+ osal_list_t *head,
+ u32 keep_reserve)
+{
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ enum _ecore_status_t rc;
+
+ /* TODO - implementation might be wasteful; will always keep room
+ * for an additional high priority ramrod (even if one is already
+ * pending FW)
+ */
+ while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+ !OSAL_LIST_IS_EMPTY(head)) {
+ struct ecore_spq_entry *p_ent =
+ OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
+ OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+ &p_spq->completion_pending);
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+
+ while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+ if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
+ break;
+
+ p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
+ struct ecore_spq_entry, list);
+ if (!p_ent)
+ return ECORE_INVAL;
+
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
+
+ ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ }
+
+ rc = ecore_spq_post_list(p_hwfn,
+ &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
+ if (rc)
+ return rc;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
+ bool b_ret_ent = true;
+
+ if (!p_hwfn)
+ return ECORE_INVAL;
+
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
+ return ECORE_INVAL;
+ }
+
+ if (p_hwfn->p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Recovery is in progress -> skip spq post"
+ " [cmd %02x protocol %02x]",
+ p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+ /* Return success to let the flows to be completed successfully
+ * w/o any error handling.
+ */
+ return ECORE_SUCCESS;
+ }
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+
+ /* Complete the entry */
+ rc = ecore_spq_fill_entry(p_hwfn, p_ent);
+
+ /* Check return value after LOCK is taken for cleaner error flow */
+ if (rc)
+ goto spq_post_fail;
+
+ /* Add the request to the pending queue */
+ rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ if (rc)
+ goto spq_post_fail;
+
+ rc = ecore_spq_pend_post(p_hwfn);
+ if (rc) {
+ /* Since it's possible that pending failed for a different
+ * entry [although unlikely], the failed entry was already
+ * dealt with; No need to return it here.
+ */
+ b_ret_ent = false;
+ goto spq_post_fail;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
+ /* For entries in ECORE BLOCK mode, the completion code cannot
+ * perform the necessary cleanup - if it did, we couldn't
+ * access p_ent here to see whether it's successful or not.
+ * Thus, after gaining the answer perform the cleanup here.
+ */
+ rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
+ if (rc)
+ goto spq_post_fail2;
+
+ /* return to pool */
+ ecore_spq_return_entry(p_hwfn, p_ent);
+ }
+ return rc;
+
+spq_post_fail2:
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
+ ecore_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+ /* return to the free pool */
+ if (b_ret_ent)
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data)
+{
+ struct ecore_spq *p_spq;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_spq_entry *tmp;
+ struct ecore_spq_entry *found = OSAL_NULL;
+ enum _ecore_status_t rc;
+
+ if (!p_hwfn)
+ return ECORE_INVAL;
+
+ p_spq = p_hwfn->p_spq;
+ if (!p_spq)
+ return ECORE_INVAL;
+
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
+ tmp,
+ &p_spq->completion_pending,
+ list, struct ecore_spq_entry) {
+ if (p_ent->elem.hdr.echo == echo) {
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+ &p_spq->completion_pending);
+
+ /* Avoid overriding of SPQ entries when getting
+ * out-of-order completions, by marking the completions
+ * in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+ SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
+ while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
+ p_spq->comp_bitmap_idx)) {
+ SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
+ p_spq->comp_bitmap_idx);
+ p_spq->comp_bitmap_idx++;
+ ecore_chain_return_produced(&p_spq->chain);
+ }
+
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+ }
+
+ /* This is debug and should be relatively uncommon - depends
+ * on scenarios which have mutliple per-PF sent ramrods.
+ */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Got completion for echo %04x - doesn't match"
+ " echo %04x in completion pending list\n",
+ OSAL_LE16_TO_CPU(echo),
+ OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+ }
+
+ /* Release lock before callback, as callback may post
+ * an additional ramrod.
+ */
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ if (!found) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to find an entry this"
+ " EQE [echo %04x] completes\n",
+ OSAL_LE16_TO_CPU(echo));
+ return ECORE_EXISTS;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Complete EQE [echo %04x]: func %p cookie %p)\n",
+ OSAL_LE16_TO_CPU(echo),
+ p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+ if (found->comp_cb.function)
+ found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+ fw_return_code);
+
+ if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
+ /* EBLOCK is responsible for freeing its own entry */
+ ecore_spq_return_entry(p_hwfn, found);
+ }
+
+ /* Attempt to post pending requests */
+ OSAL_SPIN_LOCK(&p_spq->lock);
+ rc = ecore_spq_pend_post(p_hwfn);
+ OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+ return rc;
+}
+
+struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_consq *p_consq;
+
+ /* Allocate ConsQ struct */
+ p_consq =
+ OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
+ if (!p_consq) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_consq'\n");
+ return OSAL_NULL;
+ }
+
+ /* Allocate and initialize EQ chain */
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ ECORE_CHAIN_PAGE_SIZE / 0x80,
+ 0x80, &p_consq->chain)) {
+ DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
+ goto consq_allocate_fail;
+ }
+
+ return p_consq;
+
+consq_allocate_fail:
+ ecore_consq_free(p_hwfn, p_consq);
+ return OSAL_NULL;
+}
+
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+{
+ ecore_chain_reset(&p_consq->chain);
+}
+
+void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+{
+ if (!p_consq)
+ return;
+ ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_consq);
+}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
new file mode 100644
index 00000000..5c168654
--- /dev/null
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SPQ_H__
+#define __ECORE_SPQ_H__
+
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_sp_api.h"
+
+union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
+};
+
+#define EQ_MAX_CREDIT 0xffffffff
+
+enum spq_priority {
+ ECORE_SPQ_PRIORITY_NORMAL,
+ ECORE_SPQ_PRIORITY_HIGH,
+};
+
+union ecore_spq_req_comp {
+ struct ecore_spq_comp_cb cb;
+ u64 *done_addr;
+};
+
+/* SPQ_MODE_EBLOCK */
+struct ecore_spq_comp_done {
+ u64 done;
+ u8 fw_return_code;
+};
+
+struct ecore_spq_entry {
+ osal_list_entry_t list;
+
+ u8 flags;
+
+ /* HSI slow path element */
+ struct slow_path_element elem;
+
+ union ramrod_data ramrod;
+
+ enum spq_priority priority;
+
+ /* pending queue for this entry */
+ osal_list_t *queue;
+
+ enum spq_mode comp_mode;
+ struct ecore_spq_comp_cb comp_cb;
+ struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct ecore_eq {
+ struct ecore_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
+};
+
+struct ecore_consq {
+ struct ecore_chain chain;
+};
+
+struct ecore_spq {
+ osal_spinlock_t lock;
+
+ osal_list_t unlimited_pending;
+ osal_list_t pending;
+ osal_list_t completion_pending;
+ osal_list_t free_pool;
+
+ struct ecore_chain chain;
+
+ /* allocated dma-able memory for spq entries (+ramrod data) */
+ dma_addr_t p_phys;
+ struct ecore_spq_entry *p_virt;
+
+ /* Bitmap for handling out-of-order completions */
+#define SPQ_RING_SIZE \
+ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+#define SPQ_COMP_BMAP_SIZE \
+(SPQ_RING_SIZE / (sizeof(unsigned long) * 8 /* BITS_PER_LONG */))
+ unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
+ u8 comp_bitmap_idx;
+#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \
+(OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \
+(OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \
+(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+ /* Statistics */
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
+
+ u32 cid;
+};
+
+struct ecore_port;
+struct ecore_hwfn;
+
+/**
+ * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+/**
+ * @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_get_entry - Obtain an entrry from the spq
+ * free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent);
+
+/**
+ * @brief ecore_spq_return_entry - Return an entry to spq free
+ * pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent);
+/**
+ * @brief ecore_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ */
+struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
+
+/**
+ * @brief ecore_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+
+/**
+ * @brief ecore_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+
+/**
+ * @brief ecore_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod);
+
+/**
+ * @brief ecore_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+ void *cookie);
+
+/**
+ * @brief ecore_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
+
+/**
+ * @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_alloc - Allocates & initializes an ConsQ
+ * struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ */
+struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_setup - Reset the ConsQ to its start
+ * state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+
+/**
+ * @brief ecore_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+
+#endif /* __ECORE_SPQ_H__ */
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
new file mode 100644
index 00000000..1b3119d2
--- /dev/null
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -0,0 +1,3422 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "reg_addr.h"
+#include "ecore_sriov.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_hw_defs.h"
+#include "ecore_int.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_l2.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_mcp.h"
+#include "ecore_cxt.h"
+#include "ecore_vf.h"
+#include "ecore_init_fw_funcs.h"
+
+/* TEMPORARY until we implement print_enums... */
+const char *ecore_channel_tlvs_string[] = {
+ "CHANNEL_TLV_NONE", /* ends tlv sequence */
+ "CHANNEL_TLV_ACQUIRE",
+ "CHANNEL_TLV_VPORT_START",
+ "CHANNEL_TLV_VPORT_UPDATE",
+ "CHANNEL_TLV_VPORT_TEARDOWN",
+ "CHANNEL_TLV_START_RXQ",
+ "CHANNEL_TLV_START_TXQ",
+ "CHANNEL_TLV_STOP_RXQ",
+ "CHANNEL_TLV_STOP_TXQ",
+ "CHANNEL_TLV_UPDATE_RXQ",
+ "CHANNEL_TLV_INT_CLEANUP",
+ "CHANNEL_TLV_CLOSE",
+ "CHANNEL_TLV_RELEASE",
+ "CHANNEL_TLV_LIST_END",
+ "CHANNEL_TLV_UCAST_FILTER",
+ "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
+ "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
+ "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
+ "CHANNEL_TLV_VPORT_UPDATE_MCAST",
+ "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
+ "CHANNEL_TLV_VPORT_UPDATE_RSS",
+ "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
+ "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+ "CHANNEL_TLV_MAX"
+};
+
+/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
+u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
+{
+ int i;
+
+ while (length--) {
+ crc ^= *ptr++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+ }
+ return crc;
+}
+
+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
+ int vfid,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_bulletin_content *p_bulletin;
+ struct ecore_dmae_params params;
+ struct ecore_vf_info *p_vf;
+ int crc_size = sizeof(p_bulletin->crc);
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf)
+ return ECORE_INVAL;
+
+ /* TODO - check VF is in a state where it can accept message */
+ if (!p_vf->vf_bulletin)
+ return ECORE_INVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ /* Increment bulletin board version and compute crc */
+ p_bulletin->version++;
+ p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+ p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+ /* propagate bulletin board via dmae to vm memory */
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.flags = ECORE_DMAE_FLAG_VF_DST;
+ params.dst_vfid = p_vf->abs_vf_id;
+ return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+ p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+ &params);
+}
+
+static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
+{
+ struct ecore_hw_sriov_info *iov = &p_dev->sriov_info;
+ int pos = iov->pos;
+
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_INITIAL_VF,
+ &iov->initial_vfs);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+ if (iov->num_vfs) {
+ /* @@@TODO - in future we might want to add an OSAL here to
+ * allow each OS to decide on its own how to act.
+ */
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV,
+ "Number of VFs are already set to non-zero value."
+ " Ignoring PCI configuration value\n");
+ iov->num_vfs = 0;
+ }
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+ OSAL_PCI_READ_CONFIG_WORD(p_dev,
+ pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+ OSAL_PCI_READ_CONFIG_DWORD(p_dev,
+ pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+ OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+ OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
+ "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
+ " stride %d, page size 0x%x\n", 0,
+ iov->nres, iov->cap, iov->ctrl,
+ iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
+ iov->offset, iov->stride, iov->pgsz);
+
+ /* Some sanity checks */
+ if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
+ iov->total_vfs > NUM_OF_VFS(p_dev)) {
+ /* This can happen only due to a bug. In this case we set
+ * num_vfs to zero to avoid memory corruption in the code that
+ * assumes max number of vfs
+ */
+ DP_NOTICE(p_dev, false,
+ "IOV: Unexpected number of vfs set: %d"
+ " setting num_vf to zero\n",
+ iov->num_vfs);
+
+ iov->num_vfs = 0;
+ iov->total_vfs = 0;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_igu_block *p_sb;
+ u16 sb_id;
+ u32 val;
+
+ if (!p_hwfn->hw_info.p_igu_info) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
+ return;
+ }
+
+ for (sb_id = 0;
+ sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
+ p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+ if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
+ !(p_sb->status & ECORE_IGU_STATUS_PF)) {
+ val = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+ }
+ }
+}
+
+static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
+ struct ecore_bulletin_content *p_bulletin_virt;
+ struct ecore_pf_iov *p_iov_info;
+ dma_addr_t req_p, rply_p, bulletin_p;
+ u8 idx = 0;
+
+ p_iov_info = p_hwfn->pf_iov_info;
+
+ OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+ p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+ req_p = p_iov_info->mbx_msg_phys_addr;
+ p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+ rply_p = p_iov_info->mbx_reply_phys_addr;
+ p_bulletin_virt = p_iov_info->p_bulletins;
+ bulletin_p = p_iov_info->bulletins_phys;
+ if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_setup_vfdb called without alloc mem first\n");
+ return;
+ }
+
+ p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
+
+ for (idx = 0; idx < num_vfs; idx++) {
+ struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
+ u32 concrete;
+
+ vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+ vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+ vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+ vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
+ vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
+#endif
+ vf->state = VF_STOPPED;
+
+ vf->bulletin.phys = idx *
+ sizeof(struct ecore_bulletin_content) + bulletin_p;
+ vf->bulletin.p_virt = p_bulletin_virt + idx;
+ vf->bulletin.size = sizeof(struct ecore_bulletin_content);
+
+ vf->relative_vf_id = idx;
+ vf->abs_vf_id = idx + p_hwfn->hw_info.first_vf_in_pf;
+ concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+ vf->concrete_fid = concrete;
+ /* TODO - need to devise a better way of getting opaque */
+ vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+ (vf->abs_vf_id << 8);
+ /* @@TBD MichalK - add base vport_id of VFs to equation */
+ vf->vport_id = p_iov_info->base_vport_id + idx;
+ }
+}
+
+static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ void **p_v_addr;
+ u16 num_vfs = 0;
+
+ num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+ /* Allocate PF Mailbox buffer (per-VF) */
+ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_msg_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ /* Allocate PF Mailbox Reply buffer (per-VF) */
+ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->mbx_reply_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
+ num_vfs;
+ p_v_addr = &p_iov_info->p_bulletins;
+ *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov_info->bulletins_phys,
+ p_iov_info->bulletins_size);
+ if (!*p_v_addr)
+ return ECORE_NOMEM;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF's Requests mailbox [%p virt 0x%" PRIx64 " phys], "
+ "Response mailbox [%p virt 0x%" PRIx64 " phys] Bulletins"
+ " [%p virt 0x%" PRIx64 " phys]\n",
+ p_iov_info->mbx_msg_virt_addr,
+ (u64)p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_reply_virt_addr,
+ (u64)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
+
+ /* @@@TBD MichalK - statistics / RSS */
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+ if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->mbx_msg_virt_addr,
+ p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_msg_size);
+
+ if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->mbx_reply_virt_addr,
+ p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->mbx_reply_size);
+
+ if (p_iov_info->p_bulletins)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov_info->p_bulletins,
+ p_iov_info->bulletins_phys,
+ p_iov_info->bulletins_size);
+
+ /* @@@TBD MichalK - statistics / RSS */
+}
+
+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_pf_iov *p_sriov;
+
+ if (!IS_PF_SRIOV(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No SR-IOV - no need for IOV db\n");
+ return rc;
+ }
+
+ p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
+ if (!p_sriov) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_sriov'");
+ return ECORE_NOMEM;
+ }
+
+ p_hwfn->pf_iov_info = p_sriov;
+
+ rc = ecore_iov_allocate_vfdb(p_hwfn);
+
+ return rc;
+}
+
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ if (!IS_PF_SRIOV(p_hwfn) || !p_hwfn->pf_iov_info)
+ return;
+
+ ecore_iov_setup_vfdb(p_hwfn);
+ ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void ecore_iov_free(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->pf_iov_info) {
+ ecore_iov_free_vfdb(p_hwfn);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
+ }
+}
+
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc;
+
+ /* @@@ TBD get this information from shmem / pci cfg */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ /* First hwfn should learn the PCI configuration */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ int *pos = &p_hwfn->p_dev->sriov_info.pos;
+
+ *pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!*pos) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No PCIe IOV support\n");
+ return ECORE_SUCCESS;
+ }
+
+ rc = ecore_iov_pci_cfg_info(p_dev);
+ if (rc)
+ return rc;
+ } else if (!p_hwfn->p_dev->sriov_info.pos) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* Calculate the first VF index - this is a bit tricky; Basically,
+ * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
+ * after the first engine's VFs.
+ */
+ p_hwfn->hw_info.first_vf_in_pf = p_hwfn->p_dev->sriov_info.offset +
+ p_hwfn->abs_pf_id - 16;
+ if (ECORE_PATH_ID(p_hwfn))
+ p_hwfn->hw_info.first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "First VF in hwfn 0x%08x\n", p_hwfn->hw_info.first_vf_in_pf);
+
+ return ECORE_SUCCESS;
+}
+
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+ return OSAL_NULL;
+ }
+
+ if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
+ relative_vf_id);
+
+ return vf;
+}
+
+void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id, u8 to_disable)
+{
+ struct ecore_vf_info *vf;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ return;
+
+ vf->to_disable = to_disable;
+}
+
+void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable)
+{
+ u16 i;
+
+ for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+ ecore_iov_set_vf_to_disable(p_hwfn, i, to_disable);
+}
+
+#ifndef LINUX_REMOVE
+/* @@@TBD Consider taking outside of ecore... */
+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 vf_id, void *ctx)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
+
+ if (vf != OSAL_NULL) {
+ vf->ctx = ctx;
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
+#endif
+ } else {
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+ return rc;
+}
+#endif
+
+/**
+ * VF enable primitives
+ *
+ * when pretend is required the caller is reponsible
+ * for calling pretend prioir to calling these routines
+ */
+
+/* clears vf error in all semi blocks
+ * Assumption: called under VF pretend...
+ */
+static OSAL_INLINE void ecore_iov_vf_semi_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, TSEM_REG_VF_ERROR, 1);
+ ecore_wr(p_hwfn, p_ptt, USEM_REG_VF_ERROR, 1);
+ ecore_wr(p_hwfn, p_ptt, MSEM_REG_VF_ERROR, 1);
+ ecore_wr(p_hwfn, p_ptt, XSEM_REG_VF_ERROR, 1);
+ ecore_wr(p_hwfn, p_ptt, YSEM_REG_VF_ERROR, 1);
+ ecore_wr(p_hwfn, p_ptt, PSEM_REG_VF_ERROR, 1);
+}
+
+static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 abs_vfid)
+{
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+ 1 << (abs_vfid & 0x1f));
+}
+
+static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ int i;
+ u16 igu_sb_id;
+
+ /* Set VF masks and configuration - pretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "value in VF_CONFIGURATION of vf %d after write %x\n",
+ vf->abs_vf_id,
+ ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ /* iterate ove all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++) {
+ igu_sb_id = vf->igu_sbs[i];
+ /* Set then clear... */
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
+ vf->opaque_fid);
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
+ vf->opaque_fid);
+ }
+}
+
+static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf, bool enable)
+{
+ u32 igu_vf_conf;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+ if (enable)
+ igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+ else
+ igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+}
+
+static enum _ecore_status_t
+ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
+{
+ u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+ enum _ecore_status_t rc;
+
+ if (vf->to_disable)
+ return ECORE_SUCCESS;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
+ ECORE_VF_ABS_ID(p_hwfn, vf));
+
+ ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
+ ECORE_VF_ABS_ID(p_hwfn, vf));
+
+ rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
+ if (rc)
+ return rc;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+ STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+ ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+ p_hwfn->hw_info.hw_mode);
+
+ /* unpretend */
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ if (vf->state != VF_STOPPED) {
+ DP_NOTICE(p_hwfn, true, "VF[%02x] is already started\n",
+ vf->abs_vf_id);
+ return ECORE_INVAL;
+ }
+
+ /* Start VF */
+ rc = ecore_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
+ vf->abs_vf_id);
+
+ vf->state = VF_FREE;
+
+ return rc;
+}
+
+/**
+ *
+ * @brief ecore_iov_config_perm_table - configure the permission
+ * zone table.
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf, u8 enable)
+{
+ u32 reg_addr;
+ u32 val;
+ u16 qzone_id = 0;
+ int qid;
+
+ for (qid = 0; qid < vf->num_rxqs; qid++) {
+ ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+ &qzone_id);
+
+ reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+ val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ ecore_wr(p_hwfn, p_ptt, reg_addr, val);
+ }
+}
+
+static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ /* Reset vf in IGU interrupts are still disabled */
+ ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1 /* enable */);
+
+ /* Permission Table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true /* enable */);
+}
+
+static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf,
+ u16 num_rx_queues)
+{
+ int igu_id = 0;
+ int qid = 0;
+ u32 val = 0;
+ struct ecore_igu_block *igu_blocks =
+ p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+
+ p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+ while ((qid < num_rx_queues) &&
+ (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
+ if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
+ struct cau_sb_entry sb_entry;
+
+ vf->igu_sbs[qid] = (u16)igu_id;
+ igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+ val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_id * sizeof(u64), 2, 0);
+ qid++;
+ }
+ igu_id++;
+ }
+
+ vf->num_sbs = (u8)num_rx_queues;
+
+ return vf->num_sbs;
+}
+
+/**
+ *
+ * @brief The function invalidates all the VF entries,
+ * technically this isn't required, but added for
+ * cleaness and ease of debugging incase a VF attempts to
+ * produce an interrupt after it has been taken down.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ */
+static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ int idx, igu_id;
+ u32 addr, val;
+
+ /* Invalidate igu CAM lines and mark them as free */
+ for (idx = 0; idx < vf->num_sbs; idx++) {
+ igu_id = vf->igu_sbs[idx];
+ addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ ecore_wr(p_hwfn, p_ptt, addr, val);
+
+ p_info->igu_map.igu_blocks[igu_id].status |=
+ ECORE_IGU_STATUS_FREE;
+
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ }
+
+ vf->num_sbs = 0;
+}
+
+enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id, u16 num_rx_queues)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_vf_info *vf = OSAL_NULL;
+ u8 num_of_vf_available_chains = 0;
+ u32 cids;
+ u8 i;
+
+ if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
+ rel_vf_id);
+ return ECORE_INVAL;
+ }
+
+ vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf) {
+ DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ /* Limit number of queues according to number of CIDs */
+ ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - requesting to initialize for 0x%04x queues"
+ " [0x%04x CIDs available]\n",
+ vf->relative_vf_id, num_rx_queues, (u16)cids);
+ num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+
+ num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
+ p_ptt,
+ vf,
+ num_rx_queues);
+ if (num_of_vf_available_chains == 0) {
+ DP_ERR(p_hwfn, "no available igu sbs\n");
+ return ECORE_NOMEM;
+ }
+
+ /* Choose queue number and index ranges */
+ vf->num_rxqs = num_of_vf_available_chains;
+ vf->num_txqs = num_of_vf_available_chains;
+
+ for (i = 0; i < vf->num_rxqs; i++) {
+ u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
+ vf->igu_sbs[i]);
+
+ if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%d] will require utilizing of"
+ " out-of-bounds queues - %04x\n",
+ vf->relative_vf_id, queue_id);
+ /* TODO - cleanup the already allocate SBs */
+ return ECORE_INVAL;
+ }
+
+ /* CIDs are per-VF, so no problem having them 0-based. */
+ vf->vf_queues[i].fw_rx_qid = queue_id;
+ vf->vf_queues[i].fw_tx_qid = queue_id;
+ vf->vf_queues[i].fw_cid = i;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ }
+
+ rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+
+ if (rc == ECORE_SUCCESS) {
+ struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
+ u16 vf_id = vf->relative_vf_id;
+
+ p_iov->num_vfs++;
+ p_iov->active_vfs[vf_id / 64] |= (1ULL << (vf_id % 64));
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!vf) {
+ DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ if (vf->state != VF_STOPPED) {
+ /* Stopping the VF */
+ rc = ecore_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
+ rc);
+ return rc;
+ }
+
+ vf->state = VF_STOPPED;
+ }
+
+ /* disablng interrupts and resetting permission table was done during
+ * vf-close, however, we could get here without going through vf_close
+ */
+ /* Disable Interrupts for VF */
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
+
+ /* Reset Permission table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
+
+ vf->num_rxqs = 0;
+ vf->num_txqs = 0;
+ ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+ if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
+ struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
+ u16 vf_id = vf->relative_vf_id;
+
+ p_iov->num_vfs--;
+ p_iov->active_vfs[vf_id / 64] &= ~(1ULL << (vf_id % 64));
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static bool ecore_iov_tlv_supported(u16 tlvtype)
+{
+ return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf, u16 tlv)
+{
+ /* we don't lock the channel for unsupported tlvs */
+ if (!ecore_iov_tlv_supported(tlv))
+ return;
+
+ /* lock the channel */
+ /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
+
+ /* record the locking op */
+ /* vf->op_current = tlv; @@@TBD MichalK */
+
+ /* log the lock */
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel locked by %s\n",
+ vf->abs_vf_id, ecore_channel_tlvs_string[tlv]);
+}
+
+static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 expected_tlv)
+{
+ /* we don't unlock the channel for unsupported tlvs */
+ if (!ecore_iov_tlv_supported(expected_tlv))
+ return;
+
+ /* WARN(expected_tlv != vf->op_current,
+ * "lock mismatch: expected %s found %s",
+ * channel_tlvs_string[expected_tlv],
+ * channel_tlvs_string[vf->op_current]);
+ * @@@TBD MichalK
+ */
+
+ /* lock the channel */
+ /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */
+
+ /* log the unlock */
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel unlocked by %s\n",
+ vf->abs_vf_id, ecore_channel_tlvs_string[expected_tlv]);
+
+ /* record the locking op */
+ /* vf->op_current = CHANNEL_TLV_NONE; */
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
+ u8 **offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+ tl->type = type;
+ tl->length = length;
+
+ /* Offset should keep pointing to next TLV (the end of the last) */
+ *offset += length;
+
+ /* Return a pointer to the start of the added tlv */
+ return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
+{
+ u16 i = 1, total_length = 0;
+ struct channel_tlv *tlv;
+
+ do {
+ /* cast current tlv list entry to channel tlv header */
+ tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+ /* output tlv */
+ if (ecore_iov_tlv_supported(tlv->type))
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "TLV number %d: type %s, length %d\n",
+ i, ecore_channel_tlvs_string[tlv->type],
+ tlv->length);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "TLV number %d: type %d, length %d\n",
+ i, tlv->type, tlv->length);
+
+ if (tlv->type == CHANNEL_TLV_LIST_END)
+ return;
+
+ /* Validate entry - protect against malicious VFs */
+ if (!tlv->length) {
+ DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
+ return;
+ }
+ total_length += tlv->length;
+ if (total_length >= sizeof(struct tlv_buffer_size)) {
+ DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
+ return;
+ }
+
+ i++;
+ } while (1);
+}
+
+static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ u16 length, u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct ecore_dmae_params params;
+ u8 eng_vf_id;
+
+ mbx->reply_virt->default_resp.hdr.status = status;
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ mbx->sw_mbx.response_size =
+ length + sizeof(struct channel_list_end_tlv);
+#endif
+
+ ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+ if (!p_hwfn->p_dev->sriov_info.b_hw_channel)
+ return;
+
+ eng_vf_id = p_vf->abs_vf_id;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = ECORE_DMAE_FLAG_VF_DST;
+ params.dst_vfid = eng_vf_id;
+
+ ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+ mbx->req_virt->first_tlv.reply_address +
+ sizeof(u64),
+ (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+ &params);
+
+ ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
+
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
+ enum ecore_iov_vport_update_flag flag)
+{
+ switch (flag) {
+ case ECORE_IOV_VP_UPDATE_ACTIVATE:
+ return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
+ return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ case ECORE_IOV_VP_UPDATE_TX_SWITCH:
+ return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ case ECORE_IOV_VP_UPDATE_MCAST:
+ return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ case ECORE_IOV_VP_UPDATE_RSS:
+ return CHANNEL_TLV_VPORT_UPDATE_RSS;
+ case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ case ECORE_IOV_VP_UPDATE_SGE_TPA:
+ return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ default:
+ return 0;
+ }
+}
+
+static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u8 status, u16 tlvs_mask,
+ u16 tlvs_accepted)
+{
+ struct pfvf_def_resp_tlv *resp;
+ u16 size, total_len, i;
+
+ OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+ p_mbx->offset = (u8 *)(p_mbx->reply_virt);
+ size = sizeof(struct pfvf_def_resp_tlv);
+ total_len = size;
+
+ ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+ /* Prepare response for all extended tlvs if they are found by PF */
+ for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
+ if (!(tlvs_mask & (1 << i)))
+ continue;
+
+ resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
+ ecore_iov_vport_to_tlv(p_hwfn, i), size);
+
+ if (tlvs_accepted & (1 << i))
+ resp->hdr.status = status;
+ else
+ resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - vport_update resp: TLV %d, status %02x\n",
+ p_vf->relative_vf_id,
+ ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+ total_len += size;
+ }
+
+ ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ return total_len;
+}
+
+static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf_info,
+ u16 type, u16 length, u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+ mbx->offset = (u8 *)(mbx->reply_virt);
+
+ ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ p_vf->vf_bulletin = 0;
+ p_vf->vport_instance = 0;
+ p_vf->num_mac_filters = 0;
+ p_vf->num_vlan_filters = 0;
+ p_vf->num_mc_filters = 0;
+ p_vf->configured_features = 0;
+
+ /* If VF previously requested less resources, go back to default */
+ p_vf->num_rxqs = p_vf->num_sbs;
+ p_vf->num_txqs = p_vf->num_sbs;
+
+ p_vf->num_active_rxqs = 0;
+
+ OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
+}
+
+static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+ struct pf_vf_resc *resc = &resp->resc;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ u16 length;
+ u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+
+ /* Validate FW compatibility */
+ if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
+ req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
+ req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
+ req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an incompatible driver [VF needs"
+ " FW %02x:%02x:%02x:%02x but Hypervisor is"
+ " using %02x:%02x:%02x:%02x]\n",
+ vf->abs_vf_id, req->vfdev_info.fw_major,
+ req->vfdev_info.fw_minor, req->vfdev_info.fw_revision,
+ req->vfdev_info.fw_engineering, FW_MAJOR_VERSION,
+ FW_MINOR_VERSION, FW_REVISION_VERSION,
+ FW_ENGINEERING_VERSION);
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+#ifndef __EXTRACT__LINUX__
+ if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+#endif
+
+ OSAL_MEMSET(resp, 0, sizeof(*resp));
+
+ /* Fill in vf info stuff : @@@TBD MichalK Hard Coded for now... */
+ vf->opaque_fid = req->vfdev_info.opaque_fid;
+ vf->num_mac_filters = 1;
+ vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ vf->num_mc_filters = ECORE_MAX_MC_ADDRS;
+
+ vf->vf_bulletin = req->bulletin_addr;
+ vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+ vf->bulletin.size : req->bulletin_size;
+
+ /* fill in pfdev info */
+ pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
+ pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
+ pfdev_info->indices_per_sb = PIS_PER_SB;
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED;
+
+ pfdev_info->stats_info.mstats.address =
+ PXP_VF_BAR0_START_MSDM_ZONE_B +
+ OFFSETOF(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.mstats.len =
+ sizeof(struct eth_mstorm_per_queue_stat);
+
+ pfdev_info->stats_info.ustats.address =
+ PXP_VF_BAR0_START_USDM_ZONE_B +
+ OFFSETOF(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.ustats.len =
+ sizeof(struct eth_ustorm_per_queue_stat);
+
+ pfdev_info->stats_info.pstats.address =
+ PXP_VF_BAR0_START_PSDM_ZONE_B +
+ OFFSETOF(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.pstats.len =
+ sizeof(struct eth_pstorm_per_queue_stat);
+
+ pfdev_info->stats_info.tstats.address = 0;
+ pfdev_info->stats_info.tstats.len = 0;
+
+ OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
+ ETH_ALEN);
+
+ pfdev_info->fw_major = FW_MAJOR_VERSION;
+ pfdev_info->fw_minor = FW_MINOR_VERSION;
+ pfdev_info->fw_rev = FW_REVISION_VERSION;
+ pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+ pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
+ ecore_mcp_get_mfw_ver(p_hwfn->p_dev, p_ptt, &pfdev_info->mfw_ver,
+ OSAL_NULL);
+
+ pfdev_info->dev_type = p_hwfn->p_dev->type;
+ pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
+
+ /* Fill in resc : @@@TBD MichalK Hard Coded for now... */
+ resc->num_rxqs = vf->num_rxqs;
+ resc->num_txqs = vf->num_txqs;
+ resc->num_sbs = vf->num_sbs;
+ for (i = 0; i < resc->num_sbs; i++) {
+ resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
+ resc->hw_sbs[i].sb_qid = 0;
+ }
+
+ for (i = 0; i < resc->num_rxqs; i++) {
+ ecore_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&resc->hw_qid[i]);
+ resc->cid[i] = vf->vf_queues[i].fw_cid;
+ }
+
+ resc->num_mac_filters = OSAL_MIN_T(u8, vf->num_mac_filters,
+ req->resc_request.num_mac_filters);
+ resc->num_vlan_filters = OSAL_MIN_T(u8, vf->num_vlan_filters,
+ req->resc_request.num_vlan_filters);
+ resc->num_mc_filters = OSAL_MIN_T(u8, vf->num_mc_filters,
+ req->resc_request.num_mc_filters);
+
+ /* Fill agreed size of bulletin board in response, and post
+ * an initial image to the bulletin board.
+ */
+ resp->bulletin_size = vf->bulletin.size;
+ ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
+ " db_size=%d, idx_per_sb=%d, pf_cap=0x%" PRIx64 "\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
+ " n_vlans-%d, n_mcs-%d\n",
+ vf->abs_vf_id, resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.capabilities, resc->num_rxqs,
+ resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
+ resc->num_vlan_filters, resc->num_mc_filters);
+
+ vf->state = VF_ACQUIRED;
+
+ /* Prepare Response */
+ length = sizeof(struct pfvf_acquire_resp_tlv);
+
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+ length, vfpf_status);
+
+ /* @@@TBD Bulletin */
+}
+
+static enum _ecore_status_t
+__ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, bool val)
+{
+ struct ecore_sp_vport_update_params params;
+ enum _ecore_status_t rc;
+
+ if (val == p_vf->spoof_chk) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk value[%d] is already configured\n", val);
+ return ECORE_SUCCESS;
+ }
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.update_anti_spoofing_en_flg = 1;
+ params.anti_spoofing_en = val;
+
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc == ECORE_SUCCESS) {
+ p_vf->spoof_chk = val;
+ p_vf->req_spoofchk_val = p_vf->spoof_chk;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk val[%d] configured\n", val);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+ val, p_vf->relative_vf_id);
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_filter_ucast filter;
+ int i;
+
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.opcode = ECORE_FILTER_ADD;
+
+ /* Reconfigure vlans */
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used) {
+ filter.type = ECORE_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Reconfig VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = ecore_sp_eth_filter_ucast(p_hwfn,
+ p_vf->opaque_fid,
+ &filter,
+ ECORE_SPQ_MODE_CB,
+ OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to configure VLAN [%04x]"
+ " to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, u64 events)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /*TODO - what about MACs? */
+
+ if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+ rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+ return rc;
+}
+
+static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u64 events)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_filter_ucast filter;
+
+ if (!p_vf->vport_instance)
+ return ECORE_INVAL;
+
+ if (events & (1 << MAC_ADDR_FORCED)) {
+ /* Since there's no way [currently] of removing the MAC,
+ * we can always assume this means we need to force it.
+ */
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.type = ECORE_FILTER_MAC;
+ filter.opcode = ECORE_FILTER_REPLACE;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
+
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure MAC for VF\n");
+ return rc;
+ }
+
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ }
+
+ if (events & (1 << VLAN_ADDR_FORCED)) {
+ struct ecore_sp_vport_update_params vport_update;
+ u8 removal;
+ int i;
+
+ OSAL_MEMSET(&filter, 0, sizeof(filter));
+ filter.type = ECORE_FILTER_VLAN;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.vlan = p_vf->bulletin.p_virt->pvid;
+ filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
+ ECORE_FILTER_FLUSH;
+
+ /* Send the ramrod */
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure VLAN for VF\n");
+ return rc;
+ }
+
+ /* Update the default-vlan & silent vlan stripping */
+ OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
+ vport_update.opaque_fid = p_vf->opaque_fid;
+ vport_update.vport_id = p_vf->vport_id;
+ vport_update.update_default_vlan_enable_flg = 1;
+ vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+ vport_update.update_default_vlan_flg = 1;
+ vport_update.default_vlan = filter.vlan;
+
+ vport_update.update_inner_vlan_removal_flg = 1;
+ removal = filter.vlan ?
+ 1 : p_vf->shadow_config.inner_vlan_removal;
+ vport_update.inner_vlan_removal_flg = removal;
+ vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update,
+ ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "PF failed to configure VF vport for vlan\n");
+ return rc;
+ }
+
+ /* Update all the Rx queues */
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ u16 qid;
+
+ if (!p_vf->vf_queues[i].rxq_active)
+ continue;
+
+ qid = p_vf->vf_queues[i].fw_rx_qid;
+
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+ 1, 0, 1,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to send Rx update"
+ " queue[0x%04x]\n",
+ qid);
+ return rc;
+ }
+ }
+
+ if (filter.vlan)
+ p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+ else
+ p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ }
+
+ /* If forced features are terminated, we need to configure the shadow
+ * configuration back again.
+ */
+ if (events)
+ ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+ return rc;
+}
+
+static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start = &mbx->req_virt->start_vport;
+ struct ecore_sp_vport_start_params params = { 0 };
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct ecore_vf_info *vf_info;
+ enum _ecore_status_t rc;
+ u64 *p_bitmap;
+ int sb_id;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Failed to get VF info, invalid vfid [%d]\n",
+ vf->relative_vf_id);
+ return;
+ }
+
+ vf->state = VF_ENABLED;
+
+ /* Initialize Status block in CAU */
+ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+ if (!start->sb_addr[sb_id]) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] did not fill the address of SB %d\n",
+ vf->relative_vf_id, sb_id);
+ break;
+ }
+
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt,
+ start->sb_addr[sb_id],
+ vf->igu_sbs[sb_id],
+ vf->abs_vf_id, 1 /* VF Valid */);
+ }
+ ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+ vf->mtu = start->mtu;
+ vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+ /* Take into consideration configuration forced by hypervisor;
+ * If none is configured, use the supplied VF values [for old
+ * vfs that would still be fine, since they passed '0' as padding].
+ */
+ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+ if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ u8 vf_req = start->only_untagged;
+
+ vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+ *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+ }
+
+ params.tpa_mode = start->tpa_mode;
+ params.remove_inner_vlan = start->inner_vlan_removal;
+ params.tx_switching = true;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Don't confi VF for Tx-switching [no pVFC]\n");
+ params.tx_switching = false;
+ }
+#endif
+
+ params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+ params.drop_ttl0 = false;
+ params.concrete_fid = vf->concrete_fid;
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+
+ rc = ecore_sp_eth_vport_start(p_hwfn, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vport_instance++;
+
+ /* Force configuration if needed on the newly opened vport */
+ ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+ OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
+ vf->vport_id, vf->opaque_fid);
+ __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+
+ vf->vport_instance--;
+ vf->spoof_chk = false;
+
+ rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ /* Forget the configuration on the vport */
+ vf->configured_features = 0;
+ OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_start_rxq_tlv *req = &mbx->req_virt->start_rxq;
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+
+ rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
+ vf->vf_queues[req->rx_qid].fw_cid,
+ vf->vf_queues[req->rx_qid].fw_rx_qid,
+ vf->vport_id,
+ vf->abs_vf_id + 0x10,
+ req->hw_sb,
+ req->sb_index,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr,
+ req->cqe_pbl_size);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vf_queues[req->rx_qid].rxq_active = true;
+ vf->num_active_rxqs++;
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_RXQ,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_start_txq_tlv *req = &mbx->req_virt->start_txq;
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ union ecore_qm_pq_params pq_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+
+ /* Prepare the parameters which would choose the right PQ */
+ OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+ pq_params.eth.is_vf = 1;
+ pq_params.eth.vf_id = vf->relative_vf_id;
+
+ rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
+ vf->opaque_fid,
+ vf->vf_queues[req->tx_qid].fw_tx_qid,
+ vf->vf_queues[req->tx_qid].fw_cid,
+ vf->vport_id,
+ vf->abs_vf_id + 0x10,
+ req->hw_sb,
+ req->sb_index,
+ req->pbl_addr,
+ req->pbl_size, &pq_params);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+ else
+ vf->vf_queues[req->tx_qid].txq_active = true;
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
+ length, status);
+}
+
+static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 rxq_id,
+ u8 num_rxqs,
+ bool cqe_completion)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int qid;
+
+ if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ return ECORE_INVAL;
+
+ for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+ if (vf->vf_queues[qid].rxq_active) {
+ rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_rx_qid, false,
+ cqe_completion);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].rxq_active = false;
+ vf->num_active_rxqs--;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ u16 txq_id, u8 num_txqs)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int qid;
+
+ if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ return ECORE_INVAL;
+
+ for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+ if (vf->vf_queues[qid].txq_active) {
+ rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_tx_qid);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].txq_active = false;
+ }
+ return rc;
+}
+
+static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_stop_rxqs_tlv *req = &mbx->req_virt->stop_rxqs;
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->num_rxqs, req->cqe_completion);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_stop_txqs_tlv *req = &mbx->req_virt->stop_txqs;
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req = &mbx->req_virt->update_rxq;
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ u8 complete_event_flg;
+ u8 complete_cqe_flg;
+ enum _ecore_status_t rc;
+ u16 qid;
+ u8 i;
+
+ complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+ complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+ for (i = 0; i < req->num_rxqs; i++) {
+ qid = req->rx_qid + i;
+
+ if (!vf->vf_queues[qid].rxq_active) {
+ DP_NOTICE(p_hwfn, true,
+ "VF rx_qid = %d isn`t active!\n", qid);
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+ vf->vf_queues[qid].fw_rx_qid,
+ 1,
+ complete_cqe_flg,
+ complete_event_flg,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type)
+{
+ struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+ int len = 0;
+
+ do {
+ if (!p_tlv->length) {
+ DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
+ return OSAL_NULL;
+ }
+
+ if (p_tlv->type == req_type) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Extended tlv type %s, length %d found\n",
+ ecore_channel_tlvs_string[p_tlv->type],
+ p_tlv->length);
+ return p_tlv;
+ }
+
+ len += p_tlv->length;
+ p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+ if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+ DP_NOTICE(p_hwfn, true,
+ "TLVs has overrun the buffer size\n");
+ return OSAL_NULL;
+ }
+ } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+ return OSAL_NULL;
+}
+
+static void
+ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+ p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (p_act_tlv) {
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
+ }
+}
+
+static void
+ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_vf_info *p_vf,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+ p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_vlan_tlv)
+ return;
+
+ p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+ /* Ignore the VF request if we're forcing a vlan */
+ if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ p_data->update_inner_vlan_removal_flg = 1;
+ p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+ }
+
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+ p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Ignore tx-switching configuration originating from VFs\n");
+ return;
+ }
+#endif
+
+ if (p_tx_switch_tlv) {
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
+ }
+}
+
+static void
+ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+ p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (p_mcast_tlv) {
+ p_data->update_approx_mcast_flg = 1;
+ OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
+ sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
+ }
+}
+
+static void
+ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+ p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (p_accept_tlv) {
+ p_data->accept_flags.update_rx_mode_config =
+ p_accept_tlv->update_rx_mode;
+ p_data->accept_flags.rx_accept_filter =
+ p_accept_tlv->rx_accept_filter;
+ p_data->accept_flags.update_tx_mode_config =
+ p_accept_tlv->update_tx_mode;
+ p_data->accept_flags.tx_accept_filter =
+ p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
+ }
+}
+
+static void
+ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+ p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (p_accept_any_vlan) {
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+ }
+}
+
+static void
+ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_rss_params *p_rss,
+ struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ u16 table_size;
+ u16 i, q_idx, max_q_idx;
+
+ p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (p_rss_tlv) {
+ OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
+
+ p_rss->update_rss_config =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key =
+ !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+ sizeof(p_rss->rss_ind_table));
+ OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
+ sizeof(p_rss->rss_key));
+
+ table_size = OSAL_MIN_T(u16,
+ OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
+
+ for (i = 0; i < table_size; i++) {
+ q_idx = p_rss->rss_ind_table[i];
+ if (q_idx >= max_q_idx) {
+ DP_NOTICE(p_hwfn, true,
+ "rss_ind_table[%d] = %d, rxq is out of range\n",
+ i, q_idx);
+ /* TBD: fail the request mark VF as malicious */
+ p_rss->rss_ind_table[i] =
+ vf->vf_queues[0].fw_rx_qid;
+ } else if (!vf->vf_queues[q_idx].rxq_active) {
+ DP_NOTICE(p_hwfn, true,
+ "rss_ind_table[%d] = %d, rxq is not active\n",
+ i, q_idx);
+ /* TBD: fail the request mark VF as malicious */
+ p_rss->rss_ind_table[i] =
+ vf->vf_queues[0].fw_rx_qid;
+ } else {
+ p_rss->rss_ind_table[i] =
+ vf->vf_queues[q_idx].fw_rx_qid;
+ }
+ }
+
+ p_data->rss_params = p_rss;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+ } else {
+ p_data->rss_params = OSAL_NULL;
+ }
+}
+
+static void
+ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *vf,
+ struct ecore_sp_vport_update_params *p_data,
+ struct ecore_sge_tpa_params *p_sge_tpa,
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+ p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (!p_sge_tpa_tlv) {
+ p_data->sge_tpa_params = OSAL_NULL;
+ return;
+ }
+
+ OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
+
+ p_sge_tpa->update_tpa_en_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+ p_sge_tpa->update_tpa_param_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+ VFPF_UPDATE_TPA_PARAM_FLAG);
+
+ p_sge_tpa->tpa_ipv4_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+ p_sge_tpa->tpa_ipv6_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+ p_sge_tpa->tpa_pkt_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+ p_sge_tpa->tpa_hdr_data_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+ p_sge_tpa->tpa_gro_consistent_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+ p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+ p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+ p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+ p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+ p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+ p_data->sge_tpa_params = p_sge_tpa;
+
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_sp_vport_update_params params;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct ecore_sge_tpa_params sge_tpa_params;
+ struct ecore_rss_params rss_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+ u16 tlvs_mask = 0, tlvs_accepted;
+ u16 length;
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.rss_params = OSAL_NULL;
+
+ /* Search for extended tlvs list and update values
+ * from VF in struct ecore_sp_vport_update_params.
+ */
+ ecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+ ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
+ mbx, &tlvs_mask);
+ ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+ ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ &sge_tpa_params, mbx, &tlvs_mask);
+
+ /* Just log a message if there is no single extended tlv in buffer.
+ * When all features of vport update ramrod would be requested by VF
+ * as extended TLVs in buffer then an error can be returned in response
+ * if there is no extended TLV present in buffer.
+ */
+ tlvs_accepted = tlvs_mask;
+
+#ifndef __EXTRACT__LINUX__
+ if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
+ &params, &tlvs_accepted) !=
+ ECORE_SUCCESS) {
+ tlvs_accepted = 0;
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+#endif
+
+ if (!tlvs_accepted) {
+ if (tlvs_mask)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Upper-layer prevents said VF configuration\n");
+ else
+ DP_NOTICE(p_hwfn, true,
+ "No feature tlvs found for vport update\n");
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+ tlvs_mask, tlvs_accepted);
+ ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ int i;
+
+ /* TODO - do we need a MAC shadow registery? */
+ if (p_params->type == ECORE_FILTER_MAC)
+ return ECORE_SUCCESS;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == ECORE_FILTER_REMOVE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (p_vf->shadow_config.vlans[i].used &&
+ p_vf->shadow_config.vlans[i].vid ==
+ p_params->vlan) {
+ p_vf->shadow_config.vlans[i].used = false;
+ break;
+ }
+ if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Tries to remove a non-existing vlan\n",
+ p_vf->relative_vf_id);
+ return ECORE_INVAL;
+ }
+ } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
+ p_params->opcode == ECORE_FILTER_FLUSH) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ p_vf->shadow_config.vlans[i].used = false;
+ }
+
+ /* In forced mode, we're willing to remove entries - but we don't add
+ * new ones.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ return ECORE_SUCCESS;
+
+ if (p_params->opcode == ECORE_FILTER_ADD ||
+ p_params->opcode == ECORE_FILTER_REPLACE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (!p_vf->shadow_config.vlans[i].used) {
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid =
+ p_params->vlan;
+ break;
+ }
+ if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Tries to configure more than %d vlan filters\n",
+ p_vf->relative_vf_id,
+ ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
+ return ECORE_INVAL;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req = &mbx->req_virt->ucast_filter;
+ struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+ struct ecore_filter_ucast params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc;
+
+ /* Prepare the unicast filter params */
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
+ params.opcode = (enum ecore_filter_opcode)req->opcode;
+ params.type = (enum ecore_filter_ucast_type)req->type;
+
+ /* @@@TBD - We might need logic on HV side in determining this */
+ params.is_rx_filter = 1;
+ params.is_tx_filter = 1;
+ params.vport_to_remove_from = vf->vport_id;
+ params.vport_to_add_to = vf->vport_id;
+ OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
+ params.vlan = req->vlan;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ vf->abs_vf_id, params.opcode, params.type,
+ params.is_rx_filter ? "RX" : "",
+ params.is_tx_filter ? "TX" : "",
+ params.vport_to_add_to,
+ params.mac[0], params.mac[1], params.mac[2],
+ params.mac[3], params.mac[4], params.mac[5], params.vlan);
+
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Update shadow copy of the VF configuration */
+ if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
+ ECORE_SUCCESS) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Determine if the unicast filtering is acceptible by PF */
+ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ (params.type == ECORE_FILTER_VLAN ||
+ params.type == ECORE_FILTER_MAC_VLAN)) {
+ /* Once VLAN is forced or PVID is set, do not allow
+ * to add/replace any further VLANs.
+ */
+ if (params.opcode == ECORE_FILTER_ADD ||
+ params.opcode == ECORE_FILTER_REPLACE)
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ (params.type == ECORE_FILTER_MAC ||
+ params.type == ECORE_FILTER_MAC_VLAN)) {
+ if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
+ (params.opcode != ECORE_FILTER_ADD &&
+ params.opcode != ECORE_FILTER_REPLACE))
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);
+ if (rc == ECORE_EXISTS) {
+ goto out;
+ } else if (rc == ECORE_INVAL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+ ECORE_SPQ_MODE_CB, OSAL_NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ int i;
+
+ /* Reset the SBs */
+ for (i = 0; i < vf->num_sbs; i++)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, false);
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_SUCCESS);
+}
+
+static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Disable Interrupts for VF */
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
+
+ /* Reset Permission table */
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+ length, status);
+}
+
+static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+
+ ecore_iov_vf_cleanup(p_hwfn, p_vf);
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+ length, PFVF_STATUS_SUCCESS);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+ int cnt;
+ u32 val;
+
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
+
+ for (cnt = 0; cnt < 50; cnt++) {
+ val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+ if (!val)
+ break;
+ OSAL_MSLEEP(20);
+ }
+ ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn,
+ "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+ p_vf->abs_vf_id, val);
+ return ECORE_TIMEOUT;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+ u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ int i, cnt;
+
+ /* Read initial consumers & producers */
+ for (i = 0; i < MAX_NUM_VOQS; i++) {
+ u32 prod;
+
+ cons[i] = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ prod = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+ i * 0x40);
+ distance[i] = prod - cons[i];
+ }
+
+ /* Wait for consumers to pass the producers */
+ i = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; i < MAX_NUM_VOQS; i++) {
+ u32 tmp;
+
+ tmp = ecore_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ if (distance[i] > tmp - cons[i])
+ break;
+ }
+
+ if (i == MAX_NUM_VOQS)
+ break;
+
+ OSAL_MSLEEP(20);
+ }
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+ p_vf->abs_vf_id, i);
+ return ECORE_TIMEOUT;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_prs(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+ u16 tc_cons[NUM_OF_TCS], tc_lb_cons[NUM_OF_TCS];
+ u16 prod[NUM_OF_TCS];
+ int i, cnt;
+
+ /* Read initial consumers & producers */
+ for (i = 0; i < NUM_OF_TCS; i++) {
+ tc_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
+ tc_lb_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_MSG_CT_LB_0 + i * 0x4);
+ prod[i] = (u16)ecore_rd(p_hwfn, p_ptt,
+ BRB_REG_PER_TC_COUNTERS +
+ p_hwfn->port_id * 0x20 + i * 0x4);
+ }
+
+ /* Wait for consumers to pass the producers */
+ i = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; i < NUM_OF_TCS; i++) {
+ u16 cons;
+
+ cons = (u16)ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
+ if (prod[i] - tc_cons[i] > cons - tc_cons[i])
+ break;
+
+ cons = (u16)ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_MSG_CT_LB_0 + i * 0x4);
+ if (prod[i] - tc_lb_cons[i] > cons - tc_lb_cons[i])
+ break;
+ }
+
+ if (i == NUM_OF_TCS)
+ break;
+
+ /* 16-bit counters; Delay instead of sleep... */
+ OSAL_UDELAY(10);
+ }
+
+ /* This is only optional polling for BB, since registers are only
+ * 16-bit wide and guarantee is not good enough. Don't fail things
+ * if polling didn't return the expected results.
+ */
+ if (cnt == 50)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - prs polling failed on TC %d\n",
+ p_vf->abs_vf_id, i);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_ptt *p_ptt)
+{
+ enum _ecore_status_t rc;
+
+ /* TODO - add SRC and TM polling once we add storage IOV */
+
+ rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = ecore_iov_vf_flr_poll_prs(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id, u32 *ack_vfs)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!p_vf)
+ return ECORE_SUCCESS;
+
+ if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64))) {
+ u16 vfid = p_vf->abs_vf_id;
+
+ /* TODO - should we lock channel? */
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Handling FLR\n", vfid);
+
+ ecore_iov_vf_cleanup(p_hwfn, p_vf);
+
+ /* If VF isn't active, no need for anything but SW */
+ if (!ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, p_vf->relative_vf_id))
+ goto cleanup;
+
+ /* TODO - what to do in case of failure? */
+ rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ goto cleanup;
+
+ rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
+ if (rc) {
+ /* TODO - what's now? What a mess.... */
+ DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+ return rc;
+ }
+
+ /* VF_STOPPED has to be set only after final cleanup
+ * but prior to re-enabling the VF.
+ */
+ p_vf->state = VF_STOPPED;
+
+ rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+ if (rc) {
+ /* TODO - again, a mess... */
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ vfid);
+ return rc;
+ }
+cleanup:
+ /* Mark VF for ack and clean pending state */
+ if (p_vf->state == VF_RESET)
+ p_vf->state = VF_STOPPED;
+ ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 i;
+
+ OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+ ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+ rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 rel_vf_id)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
+
+ rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+ u16 i, found = 0;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "[%08x,...,%08x]: %08x\n",
+ i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+ /* Mark VFs */
+ for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) {
+ struct ecore_vf_info *p_vf;
+ u8 vfid;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
+ if (!p_vf)
+ continue;
+
+ vfid = p_vf->abs_vf_id;
+ if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+ u16 rel_vf_id = p_vf->relative_vf_id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] [rel %d] got FLR-ed\n",
+ vfid, rel_vf_id);
+
+ p_vf->state = VF_RESET;
+
+ /* No need to lock here, since pending_flr should
+ * only change here and before ACKing MFw. Since
+ * MFW will not trigger an additional attention for
+ * VF flr until ACKs, we're safe.
+ */
+ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+ found = 1;
+ }
+ }
+
+ return found;
+}
+
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *p_params,
+ struct ecore_mcp_link_state *p_link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ if (p_params)
+ __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ if (p_link)
+ __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ if (p_caps)
+ __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, int vfid)
+{
+ struct ecore_iov_vf_mbx *mbx;
+ struct ecore_vf_info *p_vf;
+ int i;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf)
+ return;
+
+ mbx = &p_vf->vf_mbx;
+
+ /* ecore_iov_process_mbx_request */
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "ecore_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+
+ mbx->first_tlv = mbx->req_virt->first_tlv;
+
+ /* check if tlv type is known */
+ if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ ecore_iov_lock_vf_pf_channel(p_hwfn,
+ p_vf, mbx->first_tlv.tl.type);
+
+ /* switch on the opcode */
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_START:
+ ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_TEARDOWN:
+ ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_RXQ:
+ ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_TXQ:
+ ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_RXQS:
+ ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_TXQS:
+ ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_RXQ:
+ ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_UPDATE:
+ ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UCAST_FILTER:
+ ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_INT_CLEANUP:
+ ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+ break;
+ }
+
+ ecore_iov_unlock_vf_pf_channel(p_hwfn,
+ p_vf, mbx->first_tlv.tl.type);
+
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ DP_ERR(p_hwfn,
+ "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+
+ for (i = 0; i < 20; i++) {
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "%x ",
+ mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+ }
+
+ /* test whether we can respond to the VF (do we have an address
+ * for it?)
+ */
+ if (p_vf->state == VF_ACQUIRED)
+ DP_ERR(p_hwfn, "UNKNOWN TLV Not supported yet\n");
+ }
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
+ mbx->sw_mbx.response_offset = 0;
+#endif
+}
+
+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
+ __le16 vfid,
+ struct regpair *vf_msg)
+{
+ struct ecore_vf_info *p_vf;
+ u8 min, max;
+
+ if (!p_hwfn->pf_iov_info || !p_hwfn->pf_iov_info->vfs_array) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Got a message from VF while PF is not initialized for IOV support\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* Find the VF record - message comes with realtive [engine] vfid */
+ min = (u8)p_hwfn->hw_info.first_vf_in_pf;
+ max = min + p_hwfn->p_dev->sriov_info.total_vfs;
+ /* @@@TBD - for BE machines, should echo field be reversed? */
+ if ((u8)vfid < min || (u8)vfid >= max) {
+ DP_INFO(p_hwfn,
+ "Got a message from VF with relative id 0x%08x, but PF's range is [0x%02x,...,0x%02x)\n",
+ (u8)vfid, min, max);
+ return ECORE_INVAL;
+ }
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)vfid - min];
+
+ /* List the physical address of the request so that handler
+ * could later on copy the message from it.
+ */
+ p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+ return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
+}
+
+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ return ecore_sriov_vfpf_msg(p_hwfn, echo,
+ &data->vf_pf_channel.msg_addr);
+ case COMMON_EVENT_VF_FLR:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF-FLR is still not supported\n");
+ return ECORE_SUCCESS;
+ default:
+ DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
+ opcode);
+ return ECORE_INVAL;
+ }
+}
+
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64)));
+}
+
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
+ bool b_enabled_only)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+ return false;
+ }
+
+ return b_enabled_only ? ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id) :
+ (rel_vf_id < p_hwfn->p_dev->sriov_info.total_vfs);
+}
+
+struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
+ *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return OSAL_NULL;
+
+ return &vf->p_vf_info;
+}
+
+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+ u64 add_bit = 1ULL << (vfid % 64);
+
+ /* TODO - add locking mechanisms [no atomics in ecore, so we can't
+ * add the lock inside the ecore_pf_iov struct].
+ */
+ p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events)
+{
+ u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+ /* TODO - Take a lock */
+ OSAL_MEMCPY(events, p_pending_events,
+ sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+ OSAL_MEMSET(p_pending_events, 0, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+}
+
+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *ptt, int vfid)
+{
+ struct ecore_dmae_params params;
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return ECORE_INVAL;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+ params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
+ params.src_vfid = vf_info->abs_vf_id;
+
+ if (ecore_dmae_host2host(p_hwfn, ptt,
+ vf_info->vf_mbx.pending_req,
+ vf_info->vf_mbx.req_phys,
+ sizeof(union vfpf_tlvs) / 4, &params)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to copy message from VF 0x%02x\n", vfid);
+
+ return ECORE_IO;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << MAC_ADDR_FORCED;
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set MAC, invalid vfid [%d]\n", vfid);
+ return ECORE_INVAL;
+ }
+
+ if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set MAC, Forced MAC is configured\n");
+ return ECORE_INVAL;
+ }
+
+ feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+enum _ecore_status_t
+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
+ bool b_untagged_only, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Since this is configurable only during vport-start, don't take it
+ * if we're past that point.
+ */
+ if (vf_info->state == VF_ENABLED) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can't support untagged change for vfid[%d] - VF is already active\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Set configuration; This will later be taken into account during the
+ * VF initialization.
+ */
+ feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
+ (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
+ : 0;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 *opaque_fid)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ *opaque_fid = vf_info->opaque_fid;
+}
+
+void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
+ u8 *p_vort_id)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ *p_vort_id = vf_info->vport_id;
+}
+
+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *p_vf_info;
+
+ p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf_info)
+ return false;
+
+ return !!p_vf_info->vport_instance;
+}
+
+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *p_vf_info;
+
+ p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+
+ return p_vf_info->state == VF_STOPPED;
+}
+
+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return false;
+
+ return vf_info->spoof_chk;
+}
+
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn) ||
+ !ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, vfid))
+ return false;
+ else
+ return true;
+}
+
+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+ int vfid, bool val)
+{
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_vf_info *vf;
+
+ if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn, true,
+ "SR-IOV sanity check failed, can't set spoofchk\n");
+ goto out;
+ }
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ goto out;
+
+ if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+ /* After VF VPORT start PF will configure spoof check */
+ vf->req_spoofchk_val = val;
+ rc = ECORE_SUCCESS;
+ goto out;
+ }
+
+ rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+ return rc;
+}
+
+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
+{
+ u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
+
+ max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
+ : ECORE_MAX_VF_CHAINS_PER_PF;
+
+ return max_chains_per_vf;
+}
+
+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_req_virt_addr,
+ u16 *p_req_virt_size)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return;
+
+ if (pp_req_virt_addr)
+ *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
+
+ if (p_req_virt_size)
+ *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
+}
+
+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id,
+ void **pp_reply_virt_addr,
+ u16 *p_reply_virt_size)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return;
+
+ if (pp_reply_virt_addr)
+ *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
+
+ if (p_reply_virt_size)
+ *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
+}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *vf_info =
+ ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+ if (!vf_info)
+ return OSAL_NULL;
+
+ return &vf_info->vf_mbx.sw_mbx;
+}
+#endif
+
+bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
+{
+ return (length >= sizeof(struct vfpf_first_tlv) &&
+ (length <= sizeof(union vfpf_tlvs)));
+}
+
+u32 ecore_iov_pfvf_msg_length(void)
+{
+ return sizeof(union pfvf_tlvs);
+}
+
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return OSAL_NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return OSAL_NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return 0;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return 0;
+
+ return p_vf->bulletin.p_virt->pvid;
+}
+
+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid, int val)
+{
+ struct ecore_vf_info *vf;
+ enum _ecore_status_t rc;
+ u8 abs_vp_id = 0;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+
+ if (!vf)
+ return ECORE_INVAL;
+
+ rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+ int vfid, u32 rate)
+{
+ struct ecore_vf_info *vf;
+ enum _ecore_status_t rc;
+ u8 vport_id;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn, true,
+ "SR-IOV sanity check failed, can't set min rate\n");
+ return ECORE_INVAL;
+ }
+ }
+
+ vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
+ vport_id = vf->vport_id;
+
+ rc = ecore_configure_vport_wfq(p_dev, vport_id, rate);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int vfid,
+ struct ecore_eth_stats *p_stats)
+{
+ struct ecore_vf_info *vf;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ return ECORE_INVAL;
+
+ if (vf->state != VF_ENABLED)
+ return ECORE_INVAL;
+
+ __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
+ vf->abs_vf_id + 0x10, false);
+
+ return ECORE_SUCCESS;
+}
+
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_rxqs;
+}
+
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_active_rxqs;
+}
+
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return OSAL_NULL;
+
+ return p_vf->ctx;
+}
+
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return 0;
+
+ return p_vf->num_sbs;
+}
+
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_FREE);
+}
+
+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_ACQUIRED);
+}
+
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state == VF_ENABLED);
+}
+
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ struct ecore_wfq_data *vf_vp_wfq;
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return 0;
+
+ vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+ if (vf_vp_wfq->configured)
+ return vf_vp_wfq->min_speed;
+ else
+ return 0;
+}
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
new file mode 100644
index 00000000..3471e5c6
--- /dev/null
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SRIOV_H__
+#define __ECORE_SRIOV_H__
+
+#include "ecore_status.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_iov_api.h"
+#include "ecore_hsi_common.h"
+
+#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
+
+#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
+ (MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+
+/* Represents a full message. Both the request filled by VF
+ * and the response filled by the PF. The VF needs one copy
+ * of this message, it fills the request part and sends it to
+ * the PF. The PF will copy the response to the response part for
+ * the VF to later read it. The PF needs to hold a message like this
+ * per VF, the request that is copied to the PF is placed in the
+ * request size, and the response is filled by the PF before sending
+ * it to the VF.
+ */
+struct ecore_vf_mbx_msg {
+ union vfpf_tlvs req;
+ union pfvf_tlvs resp;
+};
+
+/* This data is held in the ecore_hwfn structure for VFs only. */
+struct ecore_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ osal_mutex_t mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct ecore_bulletin bulletin;
+ struct ecore_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+};
+
+/* This mailbox is maintained per VF in its PF
+ * contains all information required for sending / receiving
+ * a message
+ */
+struct ecore_iov_vf_mbx {
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ u8 *offset;
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ struct ecore_iov_sw_mbx sw_mbx;
+#endif
+
+ /* VF GPA address */
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
+
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
+
+ u8 flags;
+#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
+ * more then one pending msg
+ */
+};
+
+struct ecore_vf_q_info {
+ u16 fw_rx_qid;
+ u16 fw_tx_qid;
+ u8 fw_cid;
+ u8 rxq_active;
+ u8 txq_active;
+};
+
+enum int_mod {
+ VPORT_INT_MOD_UNDEFINED = 0,
+ VPORT_INT_MOD_ADAPTIVE = 1,
+ VPORT_INT_MOD_OFF = 2,
+ VPORT_INT_MOD_LOW = 100,
+ VPORT_INT_MOD_MEDIUM = 200,
+ VPORT_INT_MOD_HIGH = 300
+};
+
+enum vf_state {
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED = 1, /* VF, acquired, but not initalized */
+ VF_ENABLED = 2, /* VF, Enabled */
+ VF_RESET = 3, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED = 4 /* VF, Stopped */
+};
+
+struct ecore_vf_vlan_shadow {
+ bool used;
+ u16 vid;
+};
+
+struct ecore_vf_shadow_config {
+ /* Shadow copy of all guest vlans */
+ struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+ u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct ecore_vf_info {
+ struct ecore_iov_vf_mbx vf_mbx;
+ enum vf_state state;
+ u8 to_disable;
+
+ struct ecore_bulletin bulletin;
+ dma_addr_t vf_bulletin;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
+#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
+ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+ (p_vf)->abs_vf_id)
+
+ u8 vport_instance; /* Number of active vports */
+ u8 num_rxqs;
+ u8 num_txqs;
+
+ u8 num_sbs;
+
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+
+ struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
+
+ /* TODO - Only windows is using it - should be removed */
+ u8 was_malicious;
+ u8 num_active_rxqs;
+ void *ctx;
+ struct ecore_public_vf_info p_vf_info;
+ bool spoof_chk; /* Current configured on HW */
+ bool req_spoofchk_val; /* Requested value */
+
+ /* Stores the configuration requested by VF */
+ struct ecore_vf_shadow_config shadow_config;
+
+ /* A bitfield using bulletin's valid-map bits, used to indicate
+ * which of the bulletin board features have been configured.
+ */
+ u64 configured_features;
+#define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
+ (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of ecore_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct ecore_pf_iov {
+ struct ecore_vf_info vfs_array[MAX_NUM_VFS];
+ u64 pending_events[ECORE_VF_ARRAY_LENGTH];
+ u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
+ u16 base_vport_id;
+
+ /* Allocate message address continuosuly and split to each VF */
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
+};
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief Read sriov related information and allocated resources
+ * reads from configuraiton space, shmem, and allocates the VF
+ * database in the PF.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
+ u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * @brief ecore_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data);
+
+/**
+ * @brief calculate CRC for bulletin board validation
+ *
+ * @param basic crc seed
+ * @param ptr to beginning of buffer
+ * @length in bytes of buffer
+ *
+ * @return calculated crc over buffer [with respect to seed].
+ */
+u32 ecore_crc32(u32 crc, u8 *ptr, u32 length);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+
+/**
+ * @brief ecore_iov_get_vf_info - return the database of a
+ * specific VF
+ *
+ * @param p_hwfn
+ * @param relative_vf_id - relative id of the VF for which info
+ * is requested
+ * @param b_enabled_only - false iff want to access even if vf is disabled
+ *
+ * @return struct ecore_vf_info*
+ */
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only);
+#else
+static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn
+ *p_hwfn,
+ struct ecore_ptt
+ *p_ptt)
+{
+ return ECORE_SUCCESS;
+}
+
+static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset,
+ u16 type, u16 length)
+{
+ return OSAL_NULL;
+}
+
+static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
+ void *tlvs_list)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn
+ *p_hwfn)
+{
+ return ECORE_SUCCESS;
+}
+
+static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+}
+
+static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn
+ *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union
+ event_ring_data
+ * data)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
+{
+ return 0;
+}
+
+static OSAL_INLINE int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
+ u32 *disabled_vfs)
+{
+ return 0;
+}
+
+static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+ void *p_tlvs_list,
+ u16 req_type)
+{
+ return OSAL_NULL;
+}
+
+static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn
+ *p_hwfn,
+ u16
+ relative_vf_id,
+ bool
+ b_enabled_only)
+{
+ return OSAL_NULL;
+}
+
+#endif
+#endif /* __ECORE_SRIOV_H__ */
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
new file mode 100644
index 00000000..98d40bb5
--- /dev/null
+++ b/drivers/net/qede/base/ecore_status.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_STATUS_H__
+#define __ECORE_STATUS_H__
+
+enum _ecore_status_t {
+ ECORE_UNKNOWN_ERROR = -12,
+ ECORE_NORESOURCES = -11,
+ ECORE_NODEV = -10,
+ ECORE_ABORTED = -9,
+ ECORE_AGAIN = -8,
+ ECORE_NOTIMPL = -7,
+ ECORE_EXISTS = -6,
+ ECORE_IO = -5,
+ ECORE_TIMEOUT = -4,
+ ECORE_INVAL = -3,
+ ECORE_BUSY = -2,
+ ECORE_NOMEM = -1,
+ ECORE_SUCCESS = 0,
+ /* PENDING is not an error and should be positive */
+ ECORE_PENDING = 1,
+};
+
+#endif /* __ECORE_STATUS_H__ */
diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h
new file mode 100644
index 00000000..616b44c2
--- /dev/null
+++ b/drivers/net/qede/base/ecore_utils.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_UTILS_H__
+#define __ECORE_UTILS_H__
+
+/* dma_addr_t manip */
+#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff))
+#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32))
+
+#define DMA_LO_LE(x) OSAL_CPU_TO_LE32(DMA_LO(x))
+#define DMA_HI_LE(x) OSAL_CPU_TO_LE32(DMA_HI(x))
+
+/* It's assumed that whoever includes this has previously included an hsi
+ * file defining the regpair.
+ */
+#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \
+ (x).lo = DMA_LO_LE((val))
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo) HILO_GEN(hi, lo, u64)
+#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
+
+#endif
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
new file mode 100644
index 00000000..d32fb35a
--- /dev/null
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -0,0 +1,1332 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_sriov.h"
+#include "ecore_l2_api.h"
+#include "ecore_vf.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_status.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_l2.h"
+#include "ecore_mcp_api.h"
+#include "ecore_vf_api.h"
+
+static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ void *p_tlv;
+
+ /* This lock is released when we receive PF's response
+ * in ecore_send_msg2pf().
+ * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
+ * must come in sequence.
+ */
+ OSAL_MUTEX_ACQUIRE(&p_iov->mutex);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "preparing to send %s tlv over vf pf channel\n",
+ ecore_channel_tlvs_string[type]);
+
+ /* Reset Request offset */
+ p_iov->offset = (u8 *)(p_iov->vf2pf_request);
+
+ /* Clear mailbox - both request and reply */
+ OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+ OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* Init type and length */
+ p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+ /* Init first tlv header */
+ ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+ (u64)p_iov->pf2vf_reply_phys;
+
+ return p_tlv;
+}
+
+static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
+ u8 *done, u32 resp_size)
+{
+ struct ustorm_vf_zone *zone_data = (struct ustorm_vf_zone *)
+ ((u8 *)PXP_VF_BAR0_START_USDM_ZONE_B);
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ int rc = ECORE_SUCCESS, time = 100;
+ u8 pf_id;
+
+ /* output tlvs list */
+ ecore_dp_tlv_list(p_hwfn, p_req);
+
+ /* need to add the END TLV to the message size */
+ resp_size += sizeof(struct channel_list_end_tlv);
+
+ if (!p_hwfn->p_dev->sriov_info.b_hw_channel) {
+ rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
+ done,
+ p_req,
+ p_hwfn->vf_iov_info->pf2vf_reply,
+ sizeof(union vfpf_tlvs), resp_size);
+ /* TODO - no prints about message ? */
+ goto exit;
+ }
+
+ /* Send TLVs over HW channel */
+ OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+ trigger.vf_pf_msg_valid = 1;
+ /* TODO - FW should remove this requirement */
+ pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PFID);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
+ pf_id,
+ U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ &zone_data->non_trigger.vf_pf_msg_addr,
+ *((u32 *)&trigger), &zone_data->trigger);
+
+ REG_WR(p_hwfn,
+ (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+ U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ REG_WR(p_hwfn,
+ (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+ U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ /* The message data must be written first, to prevent trigger before
+ * data is written.
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
+ *((u32 *)&trigger));
+
+ while ((!*done) && time) {
+ OSAL_MSLEEP(25);
+ time--;
+ }
+
+ if (!*done) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
+ rc = ECORE_TIMEOUT;
+ goto exit;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+exit:
+ OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
+
+ return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+#define VF_ACQUIRE_MC_FILTERS 10
+
+static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct ecore_vf_acquire_sw_info vf_sw_info;
+ struct vfpf_acquire_tlv *req;
+ int rc = 0, attempts = 0;
+ bool resources_acquired = false;
+
+ /* @@@ TBD: MichalK take this from somewhere else... */
+ u8 rx_count = 1, tx_count = 1, num_sbs = 1;
+ u8 num_mac = VF_ACQUIRE_MAC_FILTERS, num_mc = VF_ACQUIRE_MC_FILTERS;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
+ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = num_sbs;
+ req->resc_request.num_mac_filters = num_mac;
+ req->resc_request.num_mc_filters = num_mc;
+ req->resc_request.num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+
+ OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
+ OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
+
+ req->vfdev_info.os_type = vf_sw_info.os_type;
+ req->vfdev_info.driver_version = vf_sw_info.driver_version;
+ req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+ req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+ req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+ req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+
+ if (vf_sw_info.override_fw_version)
+ req->vfdev_info.capabilties |= VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = p_iov->bulletin.phys;
+ req->bulletin_size = p_iov->bulletin.size;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ while (!resources_acquired) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = ecore_send_msg2pf(p_hwfn,
+ &resp->hdr.status, sizeof(*resp));
+
+ /* PF timeout */
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to p_hwfn */
+ OSAL_MEMCPY(&p_iov->acquire_resp,
+ resp, sizeof(p_iov->acquire_resp));
+
+ attempts++;
+
+ /* PF agrees to allocate our resources */
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "resources acquired\n");
+ resources_acquired = true;
+ } /* PF refuses to allocate our resources */
+ else if (resp->hdr.status ==
+ PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF unwilling to fullfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs = resp->resc.num_txqs;
+ req->resc_request.num_rxqs = resp->resc.num_rxqs;
+ req->resc_request.num_sbs = resp->resc.num_sbs;
+ req->resc_request.num_mac_filters =
+ resp->resc.num_mac_filters;
+ req->resc_request.num_vlan_filters =
+ resp->resc.num_vlan_filters;
+ req->resc_request.num_mc_filters =
+ resp->resc.num_mc_filters;
+
+ /* Clear response buffer */
+ OSAL_MEMSET(p_iov->pf2vf_reply, 0,
+ sizeof(union pfvf_tlvs));
+ } else {
+ DP_ERR(p_hwfn,
+ "PF returned error %d to VF acquisition request\n",
+ resp->hdr.status);
+ return ECORE_AGAIN;
+ }
+ }
+
+ rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "VF_UPDATE_ACQUIRE_RESC_RESP Failed: status = 0x%x.\n",
+ rc);
+ return ECORE_AGAIN;
+ }
+
+ /* Update bulletin board size with response from PF */
+ p_iov->bulletin.size = resp->bulletin_size;
+
+ /* get HW info */
+ p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
+ p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
+
+ DP_INFO(p_hwfn, "Chip details - %s%d\n",
+ ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
+ CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
+
+ /* @@@TBD MichalK: Fw ver... */
+ /* strlcpy(p_hwfn->fw_ver, p_hwfn->acquire_resp.pfdev_info.fw_ver,
+ * sizeof(p_hwfn->fw_ver));
+ */
+
+ p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
+
+ return 0;
+}
+
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev)
+{
+ enum _ecore_status_t rc = ECORE_NOMEM;
+ struct ecore_vf_iov *p_sriov;
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0]; /* @@@TBD CMT */
+
+ p_dev->num_hwfns = 1; /* @@@TBD CMT must be fixed... */
+
+ p_hwfn->regview = p_dev->regview;
+ if (p_hwfn->regview == OSAL_NULL) {
+ DP_ERR(p_hwfn,
+ "regview should be initialized before"
+ " ecore_vf_hw_prepare is called\n");
+ return ECORE_INVAL;
+ }
+
+ /* Set the doorbell bar. Assumption: regview is set */
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_VF_BAR0_ME_OPAQUE_ADDRESS);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn,
+ PXP_VF_BAR0_ME_CONCRETE_ADDRESS);
+
+ /* Allocate vf sriov info */
+ p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
+ if (!p_sriov) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `struct ecore_sriov'\n");
+ return ECORE_NOMEM;
+ }
+
+ OSAL_MEMSET(p_sriov, 0, sizeof(*p_sriov));
+
+ /* Allocate vf2pf msg */
+ p_sriov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_sriov->
+ vf2pf_request_phys,
+ sizeof(union
+ vfpf_tlvs));
+ if (!p_sriov->vf2pf_request) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `vf2pf_request' DMA memory\n");
+ goto free_p_sriov;
+ }
+
+ p_sriov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_sriov->
+ pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
+ if (!p_sriov->pf2vf_reply) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate `pf2vf_reply' DMA memory\n");
+ goto free_vf2pf_request;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF's Request mailbox [%p virt 0x%" PRIx64 " phys], "
+ "Response mailbox [%p virt 0x%" PRIx64 " phys]\n",
+ p_sriov->vf2pf_request,
+ (u64)p_sriov->vf2pf_request_phys,
+ p_sriov->pf2vf_reply, (u64)p_sriov->pf2vf_reply_phys);
+
+ /* Allocate Bulletin board */
+ p_sriov->bulletin.size = sizeof(struct ecore_bulletin_content);
+ p_sriov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_sriov->bulletin.
+ phys,
+ p_sriov->bulletin.
+ size);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF's bulletin Board [%p virt 0x%" PRIx64 " phys 0x%08x bytes]\n",
+ p_sriov->bulletin.p_virt, (u64)p_sriov->bulletin.phys,
+ p_sriov->bulletin.size);
+
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_sriov->mutex);
+ OSAL_MUTEX_INIT(&p_sriov->mutex);
+
+ p_hwfn->vf_iov_info = p_sriov;
+
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+
+ /* First VF needs to query for information from PF */
+ if (!p_hwfn->my_id)
+ rc = ecore_vf_pf_acquire(p_hwfn);
+
+ return rc;
+
+free_vf2pf_request:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sriov->vf2pf_request,
+ p_sriov->vf2pf_request_phys,
+ sizeof(union vfpf_tlvs));
+free_p_sriov:
+ OSAL_FREE(p_hwfn->p_dev, p_sriov);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn)
+{
+ p_hwfn->b_int_enabled = 1;
+
+ return 0;
+}
+
+/* TEMP TEMP until in HSI */
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+#define USTORM_QZONE_START(dev) (MSTORM_QZONE_START + \
+ (MSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ u8 rx_qid,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_start_rxq_tlv *req;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+ u8 hw_qid;
+ u64 init_prod_val = 0;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+ /* @@@TBD MichalK TPA */
+
+ req->rx_qid = rx_qid;
+ req->cqe_pbl_addr = cqe_pbl_addr;
+ req->cqe_pbl_size = cqe_pbl_size;
+ req->rxq_addr = bd_chain_phys_addr;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+ req->hc_rate = 0; /* @@@TBD MichalK -> host coalescing! */
+ req->bd_max_bytes = bd_max_bytes;
+ req->stat_id = -1; /* No stats at the moment */
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ if (pp_prod) {
+ hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+
+ *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->p_dev) +
+ (hw_qid) * MSTORM_QZONE_SIZE +
+ OFFSETOF(struct mstorm_eth_queue_zone, rx_producers);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)(&init_prod_val));
+ }
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_rxqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+ /* @@@TBD MichalK TPA */
+
+ /* @@@TBD MichalK - relevant ???
+ * flags VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN
+ */
+ req->rx_qid = rx_qid;
+ req->num_rxqs = 1;
+ req->cqe_completion = cqe_completion;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_start_txq_tlv *req;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+ /* @@@TBD MichalK TPA */
+
+ req->tx_qid = tx_queue_id;
+
+ /* Tx */
+ req->pbl_addr = pbl_addr;
+ req->pbl_size = pbl_size;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+ req->hc_rate = 0; /* @@@TBD MichalK -> host coalescing! */
+ req->flags = 0; /* @@@TBD MichalK -> flags... */
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ if (pp_doorbell) {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_txqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+ /* @@@TBD MichalK TPA */
+
+ /* @@@TBD MichalK - relevant ??? flags
+ * VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN
+ */
+ req->tx_qid = tx_qid;
+ req->num_txqs = 1;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 comp_cqe_flg, u8 comp_event_flg)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ struct vfpf_update_rxq_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
+
+ req->rx_qid = rx_queue_id;
+ req->num_rxqs = num_rxqs;
+
+ if (comp_cqe_flg)
+ req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
+ if (comp_event_flg)
+ req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
+ u16 mtu, u8 inner_vlan_removal,
+ enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
+ u8 only_untagged)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_start_tlv *req;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+ req->mtu = mtu;
+ req->vport_id = vport_id;
+ req->inner_vlan_removal = inner_vlan_removal;
+ req->tpa_mode = tpa_mode;
+ req->max_buffers_per_cqe = max_buffers_per_cqe;
+ req->only_untagged = only_untagged;
+
+ /* status blocks */
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+ if (p_hwfn->sbs_info[i])
+ req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ return rc;
+}
+
+static void
+ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *p_resp;
+ u16 tlv;
+
+ if (p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg) {
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VP update activate tlv configured\n");
+ else
+ DP_NOTICE(p_hwfn, true,
+ "VP update activate tlv config failed\n");
+ }
+
+ if (p_data->update_tx_switching_flg) {
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VP update tx switch tlv configured\n");
+#ifndef ASIC_ONLY
+ else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ DP_NOTICE(p_hwfn, false,
+ "FPGA: Skip checking whether PF"
+ " replied to Tx-switching request\n");
+#endif
+ else
+ DP_NOTICE(p_hwfn, true,
+ "VP update tx switch tlv config failed\n");
+ }
+
+ if (p_data->update_inner_vlan_removal_flg) {
+ tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VP update vlan strip tlv configured\n");
+ else
+ DP_NOTICE(p_hwfn, true,
+ "VP update vlan strip tlv config failed\n");
+ }
+
+ if (p_data->update_approx_mcast_flg) {
+ tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VP update mcast tlv configured\n");
+ else
+ DP_NOTICE(p_hwfn, true,
+ "VP update mcast tlv config failed\n");
+ }
+
+ if (p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config) {
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VP update accept_mode tlv configured\n");
+ else
+ DP_NOTICE(p_hwfn, true,
+ "VP update accept_mode tlv config failed\n");
+ }
+
+ if (p_data->rss_params) {
+ tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VP update rss tlv configured\n");
+ else
+ DP_NOTICE(p_hwfn, true,
+ "VP update rss tlv config failed\n");
+ }
+
+ if (p_data->sge_tpa_params) {
+ tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ p_resp = (struct pfvf_def_resp_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VP update sge tpa tlv configured\n");
+ else
+ DP_NOTICE(p_hwfn, true,
+ "VP update sge tpa tlv config failed\n");
+ }
+}
+
+enum _ecore_status_t
+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ struct vfpf_vport_update_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ u8 update_rx, update_tx;
+ u32 resp_size = 0;
+ u16 size, tlv;
+ int rc;
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ resp_size = sizeof(*resp);
+
+ update_rx = p_params->update_vport_active_rx_flg;
+ update_tx = p_params->update_vport_active_tx_flg;
+
+ /* clear mailbox and prep header tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+ /* Prepare extended tlvs */
+ if (update_rx || update_tx) {
+ size = sizeof(struct vfpf_vport_update_activate_tlv);
+ p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_act_tlv->update_rx = update_rx;
+ p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+ }
+
+ if (update_tx) {
+ p_act_tlv->update_tx = update_tx;
+ p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+ }
+ }
+
+ if (p_params->update_inner_vlan_removal_flg) {
+ size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
+ p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
+ }
+
+ if (p_params->update_tx_switching_flg) {
+ size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+ }
+
+ if (p_params->update_approx_mcast_flg) {
+ size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+ p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
+ sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ }
+
+ update_rx = p_params->accept_flags.update_rx_mode_config;
+ update_tx = p_params->accept_flags.update_tx_mode_config;
+
+ if (update_rx || update_tx) {
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+ p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_accept_tlv->update_rx_mode = update_rx;
+ p_accept_tlv->rx_accept_filter =
+ p_params->accept_flags.rx_accept_filter;
+ }
+
+ if (update_tx) {
+ p_accept_tlv->update_tx_mode = update_tx;
+ p_accept_tlv->tx_accept_filter =
+ p_params->accept_flags.tx_accept_filter;
+ }
+ }
+
+ if (p_params->rss_params) {
+ struct ecore_rss_params *rss_params = p_params->rss_params;
+
+ size = sizeof(struct vfpf_vport_update_rss_tlv);
+ p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (rss_params->update_rss_config)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CONFIG_FLAG;
+ if (rss_params->update_rss_capabilities)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CAPS_FLAG;
+ if (rss_params->update_rss_ind_table)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+ if (rss_params->update_rss_key)
+ p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+ p_rss_tlv->rss_enable = rss_params->rss_enable;
+ p_rss_tlv->rss_caps = rss_params->rss_caps;
+ p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+ OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
+ sizeof(rss_params->rss_ind_table));
+ OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ }
+
+ if (p_params->update_accept_any_vlan_flg) {
+ size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+ p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+ p_any_vlan_tlv->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
+ }
+
+ if (p_params->sge_tpa_params) {
+ struct ecore_sge_tpa_params *sge_tpa_params =
+ p_params->sge_tpa_params;
+
+ size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
+ p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (sge_tpa_params->update_tpa_en_flg)
+ p_sge_tpa_tlv->update_sge_tpa_flags |=
+ VFPF_UPDATE_TPA_EN_FLAG;
+ if (sge_tpa_params->update_tpa_param_flg)
+ p_sge_tpa_tlv->update_sge_tpa_flags |=
+ VFPF_UPDATE_TPA_PARAM_FLAG;
+
+ if (sge_tpa_params->tpa_ipv4_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
+ if (sge_tpa_params->tpa_ipv6_en_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
+ if (sge_tpa_params->tpa_pkt_split_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
+ if (sge_tpa_params->tpa_hdr_data_split_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_HDR_DATA_SPLIT_FLAG;
+ if (sge_tpa_params->tpa_gro_consistent_flg)
+ p_sge_tpa_tlv->sge_tpa_flags |=
+ VFPF_TPA_GRO_CONSIST_FLAG;
+
+ p_sge_tpa_tlv->tpa_max_aggs_num =
+ sge_tpa_params->tpa_max_aggs_num;
+ p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
+ p_sge_tpa_tlv->tpa_min_size_to_start =
+ sge_tpa_params->tpa_min_size_to_start;
+ p_sge_tpa_tlv->tpa_min_size_to_cont =
+ sge_tpa_params->tpa_min_size_to_cont;
+
+ p_sge_tpa_tlv->max_buffers_per_cqe =
+ sge_tpa_params->max_buffers_per_cqe;
+ }
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_first_tlv *req;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_AGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_first_tlv *req;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ u32 size;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = ECORE_AGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ /* TODO - might need to revise this for 100g */
+ if (IS_LEAD_HWFN(p_hwfn))
+ OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+
+ if (p_iov->vf2pf_request)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys,
+ sizeof(union vfpf_tlvs));
+ if (p_iov->pf2vf_reply)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->pf2vf_reply,
+ p_iov->pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct ecore_bulletin_content);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->bulletin.p_virt,
+ p_iov->bulletin.phys, size);
+ }
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = OSAL_NULL;
+
+ return rc;
+}
+
+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_mcast *p_filter_cmd)
+{
+ struct ecore_sp_vport_update_params sp_params;
+ int i;
+
+ OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
+ sp_params.update_approx_mcast_flg = 1;
+
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ OSAL_SET_BIT(bit, sp_params.bins);
+ }
+ }
+
+ ecore_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_ucast
+ *p_ucast)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_ucast_filter_tlv *req;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* Sanitize */
+ if (p_ucast->opcode == ECORE_FILTER_MOVE) {
+ DP_NOTICE(p_hwfn, true,
+ "VFs don't support Moving of filters\n");
+ return ECORE_INVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+ req->opcode = (u8)p_ucast->opcode;
+ req->type = (u8)p_ucast->type;
+ OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
+ req->vlan = p_ucast->vlan;
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_AGAIN;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return ECORE_INVAL;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
+ u8 *p_change)
+{
+ struct ecore_bulletin_content shadow;
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ u32 crc, crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+
+ *p_change = 0;
+
+ /* Need to guarantee PF is not in the middle of writing it */
+ OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+ /* If version did not update, no need to do anything */
+ if (shadow.version == p_iov->bulletin_shadow.version)
+ return ECORE_SUCCESS;
+
+ /* Verify the bulletin we see is valid */
+ crc = ecore_crc32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
+ if (crc != shadow.crc)
+ return ECORE_AGAIN;
+
+ /* Set the shadow bulletin and process it */
+ OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Read a bulletin update %08x\n", shadow.version);
+
+ *p_change = 1;
+
+ return ECORE_SUCCESS;
+}
+
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *p_params,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_params, 0, sizeof(*p_params));
+
+ p_params->speed.autoneg = p_bulletin->req_autoneg;
+ p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+ p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+ p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+ p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+ p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+ p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *params)
+{
+ __ecore_vf_get_link_params(p_hwfn, params,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *p_link,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+
+ p_link->link_up = p_bulletin->link_up;
+ p_link->speed = p_bulletin->speed;
+ p_link->full_duplex = p_bulletin->full_duplex;
+ p_link->an = p_bulletin->autoneg;
+ p_link->an_complete = p_bulletin->autoneg_complete;
+ p_link->parallel_detection = p_bulletin->parallel_detection;
+ p_link->pfc_enabled = p_bulletin->pfc_enabled;
+ p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+ p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+ p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+ p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+ p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *link)
+{
+ __ecore_vf_get_link_state(p_hwfn, link,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps,
+ struct ecore_bulletin_content *p_bulletin)
+{
+ OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
+ p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps)
+{
+ __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
+ &p_hwfn->vf_iov_info->bulletin_shadow);
+}
+
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
+{
+ *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
+{
+ OSAL_MEMCPY(port_mac,
+ p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
+ ETH_ALEN);
+}
+
+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+ struct ecore_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+/* @DPDK */
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac)
+{
+ struct ecore_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_mac = p_vf->acquire_resp.resc.num_mac_filters;
+}
+
+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+ if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return true;
+
+ /* Forbid VF from changing a MAC enforced by PF */
+ if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
+ return false;
+
+ return false;
+}
+
+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
+ u8 *p_is_forced)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ if (p_is_forced)
+ *p_is_forced = 1;
+ } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+ if (p_is_forced)
+ *p_is_forced = 0;
+ } else {
+ return false;
+ }
+
+ OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
+
+ return true;
+}
+
+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
+{
+ struct ecore_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return false;
+
+ if (dst_pvid)
+ *dst_pvid = bulletin->pvid;
+
+ return true;
+}
+
+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
+ u16 *fw_eng)
+{
+ struct pf_vf_pfdev_info *info;
+
+ info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+ *fw_major = info->fw_major;
+ *fw_minor = info->fw_minor;
+ *fw_rev = info->fw_rev;
+ *fw_eng = info->fw_eng;
+}
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
new file mode 100644
index 00000000..334b588c
--- /dev/null
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_H__
+#define __ECORE_VF_H__
+
+#include "ecore_status.h"
+#include "ecore_vf_api.h"
+#include "ecore_l2_api.h"
+#include "ecore_vfpf_if.h"
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ *
+ * @brief hw preparation for VF
+ * sends ACQUIRE message
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev);
+
+/**
+ *
+ * @brief VF init in hw (equivalent to hw_init in PF)
+ * mark interrupts as enabled
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ *
+ * @brief VF - start the RX Queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param cid - zero based within the VF
+ * @param rx_queue_id - zero based within the VF
+ * @param sb - VF status block for this queue
+ * @param sb_index - Index within the status block
+ * @param bd_max_bytes - maximum number of bytes per bd
+ * @param bd_chain_phys_addr - physical address of bd chain
+ * @param cqe_pbl_addr - physical address of pbl
+ * @param cqe_pbl_size - pbl size
+ * @param pp_prod - pointer to the producer to be
+ * used in fasthwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod);
+
+/**
+ *
+ * @brief VF - start the TX queue by sending a message to the
+ * PF.
+ *
+ * @param p_hwfn
+ * @param tx_queue_id - zero based within the VF
+ * @param sb - status block for this queue
+ * @param sb_index - index within the status block
+ * @param bd_chain_phys_addr - physical address of tx chain
+ * @param pp_doorbell - pointer to address to which to
+ * write the doorbell too..
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell);
+
+/**
+ *
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param rx_qid
+ * @param cqe_completion
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion);
+
+/**
+ *
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param tx_qid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ u16 tx_qid);
+
+/**
+ * @brief VF - update the RX queue by sending a message to the
+ * PF
+ *
+ * @param p_hwfn
+ * @param rx_queue_id
+ * @param num_rxqs
+ * @param init_sge_ring
+ * @param comp_cqe_flg
+ * @param comp_event_flg
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg);
+
+/**
+ *
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params);
+
+/**
+ *
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);
+
+/**
+ *
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
+
+/**
+ *
+ * @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief ecore_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum ecore_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged);
+
+/**
+ * @brief ecore_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_ucast
+ *p_param);
+
+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief ecore_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *p_params,
+ struct ecore_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *p_link,
+ struct ecore_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps,
+ struct ecore_bulletin_content *p_bulletin);
+
+#else
+static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev
+ *p_dev)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn
+ *p_hwfn)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn
+ *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t
+ bd_chain_phys_adr,
+ dma_addr_t
+ cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM *
+ *pp_prod)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn
+ *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t
+ pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM *
+ *pp_doorbell)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn
+ *p_hwfn,
+ u16 rx_qid,
+ bool
+ cqe_completion)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn
+ *p_hwfn,
+ u16 tx_qid)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct
+ ecore_hwfn
+ * p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8
+ comp_event_flg)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_params)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn
+ *p_hwfn)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn
+ *p_hwfn)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 sb_id)
+{
+ return 0;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(
+ struct ecore_hwfn *p_hwfn, u8 vport_id, u16 mtu,
+ u8 inner_vlan_removal, enum ecore_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(
+ struct ecore_hwfn *p_hwfn)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(
+ struct ecore_hwfn *p_hwfn, struct ecore_filter_ucast *p_param)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_mcast
+ *p_filter_cmd)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct
+ ecore_hwfn
+ * p_hwfn)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params
+ *p_params,
+ struct ecore_bulletin_content
+ *p_bulletin)
+{
+}
+
+static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state
+ *p_link,
+ struct ecore_bulletin_content
+ *p_bulletin)
+{
+}
+
+static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct
+ ecore_mcp_link_capabilities
+ * p_link_caps,
+ struct ecore_bulletin_content
+ *p_bulletin)
+{
+}
+#endif
+
+#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
new file mode 100644
index 00000000..f28b6860
--- /dev/null
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_API_H__
+#define __ECORE_VF_API_H__
+
+#include "ecore_sp_api.h"
+#include "ecore_mcp_api.h"
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
+ u8 *p_change);
+
+/**
+ * @brief Get link parameters for VF from ecore
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from ecore
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from ecore
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated RX queues
+ */
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated VLAN filters
+ */
+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+/**
+ * @brief Get number of MAC filters allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_mac - allocated MAC filters
+ */
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * @brief Copy forced MAC address from bulletin board
+ *
+ * @param hwfn
+ * @param dst_mac
+ * @param p_is_forced - out param which indicate in case mac
+ * exist if it forced or not.
+ *
+ * @return bool - return true if mac exist and false if
+ * not.
+ */
+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
+ u8 *p_is_forced);
+
+/**
+ * @brief Check if force vlan is set and copy the forced vlan
+ * from bulletin board
+ *
+ * @param hwfn
+ * @param dst_pvid
+ * @return bool
+ */
+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire response
+ * tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+ u16 *fw_major,
+ u16 *fw_minor, u16 *fw_rev, u16 *fw_eng);
+#else
+static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn
+ *p_hwfn,
+ u8 *p_change)
+{
+ return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_params
+ *params)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_link_state
+ *link)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+ struct
+ ecore_mcp_link_capabilities
+ * p_link_caps)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_rxqs)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
+ u8 *port_mac)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac)
+{
+}
+
+static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
+{
+ return false;
+}
+
+static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn
+ *hwfn, u8 *dst_mac,
+ u8 *p_is_forced)
+{
+ return false;
+}
+
+static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+}
+#endif
+#endif
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
new file mode 100644
index 00000000..2fa4d155
--- /dev/null
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -0,0 +1,590 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_PF_IF_H__
+#define __ECORE_VF_PF_IF_H__
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+#ifndef aligned_u64
+#define aligned_u64 u64
+#endif
+
+/***********************************************
+ *
+ * Common definitions for all HVs
+ *
+ **/
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+ u16 padding;
+};
+
+struct hw_sb_info {
+ u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
+ u8 padding[5];
+};
+
+/***********************************************
+ *
+ * HW VF-PF channel definitions
+ *
+ * A.K.A VF-PF mailbox
+ *
+ **/
+#define TLV_BUFFER_SIZE 1024
+#define TLV_ALIGN sizeof(u64)
+#define PF_VF_BULLETIN_SIZE 512
+
+#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+/* TODO: #define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020 */
+
+#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
+#define BULLETIN_CRC_SEED 0
+
+enum {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 padding;
+ aligned_u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+#define VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER (1 << 0)
+ aligned_u64 capabilties;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_revision;
+ u8 fw_engineering;
+ u32 driver_version;
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 padding[5];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ aligned_u64 bulletin_addr;
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+ struct channel_tlv tl;
+
+ u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
+#define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
+
+ u8 rss_enable;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct pfvf_stats_info {
+ struct pfvf_storm_stats mstats;
+ struct pfvf_storm_stats pstats;
+ struct pfvf_storm_stats tstats;
+ struct pfvf_storm_stats ustats;
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 mfw_ver;
+
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ aligned_u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0)
+
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 os_type;
+
+ /* Thesee should match the PF's ecore_dev values */
+ u16 chip_rev;
+ u8 dev_type;
+
+ u8 padding;
+
+ struct pfvf_stats_info stats_info;
+
+ u8 port_mac[ETH_ALEN];
+ u8 padding2[2];
+ } pfdev_info;
+
+ struct pf_vf_resc {
+ /* in case of status NO_RESOURCE in message hdr, pf will fill
+ * this struct with suggested amount of resources for next
+ * acquire request
+ */
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 padding[2];
+ } resc;
+
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* Init VF */
+struct vfpf_init_tlv {
+ struct vfpf_first_tlv first_tlv;
+ aligned_u64 stats_addr;
+
+ u16 rx_mask;
+ u16 tx_mask;
+ u8 drop_ttl0_flg;
+ u8 padding[3];
+
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ aligned_u64 rxq_addr;
+ aligned_u64 deprecated_sge_addr;
+ aligned_u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
+
+};
+
+struct vfpf_start_txq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ aligned_u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 cqe_completion;
+ u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 tx_qid;
+ u8 num_txqs;
+ u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ aligned_u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
+
+ u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+
+ u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 inner_vlan_removal;
+
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
+
+ u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+
+ aligned_u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+ struct channel_tlv tl;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+ struct channel_tlv tl;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+
+ u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+ struct channel_tlv tl;
+
+ u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
+#define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
+#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
+#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
+
+ u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
+#define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
+
+ u8 max_buffers_per_cqe;
+
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
+
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 opcode;
+ u8 type;
+
+ u8 mac[ETH_ALEN];
+
+ u16 vlan;
+ u16 padding[3];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_init_tlv init;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+enum ecore_bulletin_bit {
+ /* Alert the VF that a forced MAC was set by the PF */
+ MAC_ADDR_FORCED = 0,
+
+ /* The VF should not access the vfpf channel */
+ VFPF_CHANNEL_INVALID = 1,
+
+ /* Alert the VF that a forced VLAN was set by the PF */
+ VLAN_ADDR_FORCED = 2,
+
+ /* Indicate that `default_only_untagged' contains actual data */
+ VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+ VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+ /* Alert the VF that suggested mac was sent by the PF.
+ * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set
+ */
+ VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct ecore_bulletin_content {
+ u32 crc; /* crc of structure to ensure is not in
+ * mid-update
+ */
+ u32 version;
+
+ aligned_u64 valid_bitmap; /* bitmap indicating wich fields
+ * hold valid values
+ */
+
+ u8 mac[ETH_ALEN]; /* used for MAC_ADDR or MAC_ADDR_FORCED */
+
+ u8 default_only_untagged; /* If valid, 1 => only untagged Rx
+ * if no vlan filter is configured.
+ */
+ u8 padding;
+
+ /* The following is a 'copy' of ecore_mcp_link_state,
+ * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's
+ * possible the structs will increase further along the road we cannot
+ * have it here; Instead we need to have all of its fields.
+ */
+ u8 req_autoneg;
+ u8 req_autoneg_pause;
+ u8 req_forced_rx;
+ u8 req_forced_tx;
+ u8 padding2[4];
+
+ u32 req_adv_speed;
+ u32 req_forced_speed;
+ u32 req_loopback;
+ u32 padding3;
+
+ u8 link_up;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 autoneg_complete;
+ u8 parallel_detection;
+ u8 pfc_enabled;
+ u8 partner_tx_flow_ctrl_en;
+ u8 partner_rx_flow_ctrl_en;
+ u8 partner_adv_pause;
+ u8 sfp_tx_fault;
+ u8 padding4[6];
+
+ u32 speed;
+ u32 partner_adv_speed;
+
+ u32 capability_speed;
+
+ /* Forced vlan */
+ u16 pvid;
+ u16 padding5;
+};
+
+struct ecore_bulletin {
+ dma_addr_t phys;
+ struct ecore_bulletin_content *p_virt;
+ u32 size;
+};
+
+#ifndef print_enum
+enum {
+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
+
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_START_RXQ,
+ CHANNEL_TLV_START_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_MAX
+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
+};
+extern const char *ecore_channel_tlvs_string[];
+
+#else
+print_enum(channel_tlvs, CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_SETUP_RXQ,
+ CHANNEL_TLV_SETUP_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_MAX);
+#endif
+
+#endif /* __ECORE_VF_PF_IF_H__ */
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
new file mode 100644
index 00000000..046bbb20
--- /dev/null
+++ b/drivers/net/qede/base/eth_common.h
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+#define ETH_CACHE_LINE_SIZE 64
+#define ETH_RX_CQE_GAP 32
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES 510
+#define ETH_TX_LSO_WINDOW_BDS_NUM 18
+#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFFFF
+
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+
+#define ETH_RX_MAX_BUFF_PER_PKT 5
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
+
+/* approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+
+/* ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT 10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_KEY_SIZE_REGS 10
+#define ETH_RSS_ENGINE_NUM_K2 207
+#define ETH_RSS_ENGINE_NUM_BB 127
+
+/* TPA constants */
+#define ETH_TPA_MAX_AGGS_NUM 64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+
+/*
+ * Interrupt coalescing TimeSet
+ */
+struct coalescing_timeset {
+ u8 timeset;
+ u8 valid /* Only if this flag is set, timeset will take effect */;
+};
+
+/*
+ * Destination port mode
+ */
+enum dest_port_mode {
+ DEST_PORT_PHY /* Send to physical port. */,
+ DEST_PORT_LOOPBACK /* Send to loopback port. */,
+ DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
+ DEST_PORT_DROP /* Drop the packet in PBF. */,
+ MAX_DEST_PORT_MODE
+};
+
+/*
+ * Ethernet address type
+ */
+enum eth_addr_type {
+ BROADCAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ UNICAST_ADDRESS,
+ UNKNOWN_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
+struct eth_tx_1st_bd_flags {
+ u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
+};
+
+/*
+ * The parsing information data for the first tx bd of a given packet.
+ */
+struct eth_tx_data_1st_bd {
+ __le16 vlan /* VLAN to insert to packet (if needed). */;
+ /* Number of BDs in packet. Should be at least 2 in non-LSO
+ * packet and at least 3 in LSO (or Tunnel with IPv6+ext) packet.
+ */
+ u8 nbds;
+ struct eth_tx_1st_bd_flags bd_flags;
+ __le16 bitfields;
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
+};
+
+/*
+ * The parsing information data for the second tx bd of a given packet.
+ */
+struct eth_tx_data_2nd_bd {
+ __le16 tunn_ip_size;
+ __le16 bitfields1;
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
+ __le16 bitfields2;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
+};
+
+/*
+ * Firmware data for L2-EDPM packet.
+ */
+struct eth_edpm_fw_data {
+ struct eth_tx_data_1st_bd data_1st_bd
+ /* Parsing information data from the 1st BD. */;
+ struct eth_tx_data_2nd_bd data_2nd_bd
+ /* Parsing information data from the 2nd BD. */;
+ __le32 reserved;
+};
+
+/*
+ * FW debug.
+ */
+struct eth_fast_path_cqe_fw_debug {
+ u8 reserved0 /* FW reserved. */;
+ u8 reserved1 /* FW reserved. */;
+ __le16 reserved2 /* FW reserved. */;
+};
+
+struct tunnel_parsing_flags {
+ u8 flags;
+#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
+#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+};
+
+/*
+ * Regular ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_reg_cqe {
+ u8 type /* CQE type */;
+ u8 bitfields;
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
+ __le16 pkt_len /* Total packet length (from the parser) */;
+ struct parsing_and_err_flags pars_flags
+ /* Parsing and error flags from the parser */;
+ __le16 vlan_tag /* 802.1q VLAN tag */;
+ __le32 rss_hash /* RSS hash result */;
+ __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+ u8 placement_offset /* Offset of placement from BD start */;
+ struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
+ ;
+ u8 bd_num /* Number of BDs, used for packet */;
+ u8 reserved[7];
+ struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+ u8 reserved1[3];
+ u8 flags;
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
+};
+
+/*
+ * TPA-continue ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_cont_cqe {
+ u8 type /* CQE type */;
+ u8 tpa_agg_index /* TPA aggregation index */;
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]
+ /* List of the segment sizes */;
+ u8 reserved[5];
+ u8 reserved1 /* FW reserved. */;
+ __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */;
+};
+
+/*
+ * TPA-end ETH Rx FP CQE .
+ */
+struct eth_fast_path_rx_tpa_end_cqe {
+ u8 type /* CQE type */;
+ u8 tpa_agg_index /* TPA aggregation index */;
+ __le16 total_packet_len /* Total aggregated packet length */;
+ u8 num_of_bds /* Total number of BDs comprising the packet */;
+ u8 end_reason /* Aggregation end reason. Use enum eth_tpa_end_reason */
+ ;
+ __le16 num_of_coalesced_segs /* Number of coalesced TCP segments */;
+ __le32 ts_delta /* TCP timestamp delta */;
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]
+ /* List of the segment sizes */;
+ u8 reserved1[3];
+ u8 reserved2 /* FW reserved. */;
+ __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */;
+};
+
+/*
+ * TPA-start ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_start_cqe {
+ u8 type /* CQE type */;
+ u8 bitfields;
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
+ __le16 seg_len /* Segment length (packetLen from the parser) */;
+ struct parsing_and_err_flags pars_flags
+ /* Parsing and error flags from the parser */;
+ __le16 vlan_tag /* 802.1q VLAN tag */;
+ __le32 rss_hash /* RSS hash result */;
+ __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+ u8 placement_offset /* Offset of placement from BD start */;
+ struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
+ ;
+ u8 tpa_agg_index /* TPA aggregation index */;
+ u8 header_len /* Packet L2+L3+L4 header length */;
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]
+ /* Additional BDs length list. */;
+ struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+};
+
+/*
+ * The L4 pseudo checksum mode for Ethernet
+ */
+enum eth_l4_pseudo_checksum_mode {
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
+ /* Pseudo Header checksum on packet is calculated
+ * with the correct packet length field.
+ */
+ ,
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH
+ /* Pseudo Hdr checksum on packet is calc with zero len field. */
+ ,
+ MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct eth_rx_bd {
+ struct regpair addr /* single continues buffer */;
+};
+
+/*
+ * regular ETH Rx SP CQE
+ */
+struct eth_slow_path_rx_cqe {
+ u8 type /* CQE type */;
+ u8 ramrod_cmd_id;
+ u8 error_flag;
+ u8 reserved[25];
+ __le16 echo;
+ u8 reserved1;
+ u8 flags;
+#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
+};
+
+/*
+ * union for all ETH Rx CQE types
+ */
+union eth_rx_cqe {
+ struct eth_fast_path_rx_reg_cqe fast_path_regular /* Regular FP CQE */;
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start
+ /* TPA-start CQE */;
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont
+ /* TPA-continue CQE */;
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end /* TPA-end CQE */
+ ;
+ struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
+};
+
+/*
+ * ETH Rx CQE type
+ */
+enum eth_rx_cqe_type {
+ ETH_RX_CQE_TYPE_UNUSED,
+ ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */,
+ ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */,
+ MAX_ETH_RX_CQE_TYPE
+};
+
+/*
+ * Wrapp for PD RX CQE used in order to cover full cache line when writing CQE
+ */
+struct eth_rx_pmd_cqe {
+ union eth_rx_cqe cqe /* CQE data itself */;
+ u8 reserved[ETH_RX_CQE_GAP];
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_prod_data {
+ __le16 bd_prod /* BD producer */;
+ __le16 cqe_prod /* CQE producer */;
+ __le16 reserved;
+ __le16 reserved1 /* FW reserved. */;
+};
+
+/*
+ * Aggregation end reason.
+ */
+enum eth_tpa_end_reason {
+ ETH_AGG_END_UNUSED,
+ ETH_AGG_END_SP_UPDATE /* SP configuration update */,
+ ETH_AGG_END_MAX_LEN
+ /* Maximum aggregation length or maximum buffer number used. */,
+ ETH_AGG_END_LAST_SEG
+ /* TCP PSH flag or TCP payload length below continue threshold. */,
+ ETH_AGG_END_TIMEOUT /* Timeout expiration. */,
+ ETH_AGG_END_NOT_CONSISTENT,
+ ETH_AGG_END_OUT_OF_ORDER,
+ ETH_AGG_END_NON_TPA_SEG,
+ MAX_ETH_TPA_END_REASON
+};
+
+/*
+ * Eth Tunnel Type
+ */
+enum eth_tunn_type {
+ ETH_TUNN_GENEVE /* GENEVE Tunnel. */,
+ ETH_TUNN_TTAG /* T-Tag Tunnel. */,
+ ETH_TUNN_GRE /* GRE Tunnel. */,
+ ETH_TUNN_VXLAN /* VXLAN Tunnel. */,
+ MAX_ETH_TUNN_TYPE
+};
+
+/*
+ * The first tx bd of a given packet
+ */
+struct eth_tx_1st_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_1st_bd data /* Parsing information data. */;
+};
+
+/*
+ * The second tx bd of a given packet
+ */
+struct eth_tx_2nd_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_2nd_bd data /* Parsing information data. */;
+};
+
+/*
+ * The parsing information data for the third tx bd of a given packet.
+ */
+struct eth_tx_data_3rd_bd {
+ __le16 lso_mss /* For LSO packet - the MSS in bytes. */;
+ __le16 bitfields;
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+ u8 tunn_l4_hdr_start_offset_w;
+ u8 tunn_hdr_size_w;
+};
+
+/*
+ * The third tx bd of a given packet
+ */
+struct eth_tx_3rd_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_3rd_bd data /* Parsing information data. */;
+};
+
+/*
+ * Complementary information for the regular tx bd of a given packet.
+ */
+struct eth_tx_data_bd {
+ __le16 reserved0;
+ __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+ __le16 reserved3;
+};
+
+/*
+ * The common regular TX BD ring element
+ */
+struct eth_tx_bd {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ struct eth_tx_data_bd data /* Complementary information. */;
+};
+
+union eth_tx_bd_types {
+ struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
+ struct eth_tx_2nd_bd second_bd /* The second tx bd of a given packet */
+ ;
+ struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
+ struct eth_tx_bd reg_bd /* The common non-special bd */;
+};
+
+/*
+ * Mstorm Queue Zone
+ */
+struct mstorm_eth_queue_zone {
+ struct eth_rx_prod_data rx_producers;
+ __le32 reserved[2];
+};
+
+/*
+ * Ustorm Queue Zone
+ */
+struct ustorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset
+ /* Rx interrupt coalescing TimeSet */;
+ __le16 reserved[3];
+};
+
+/*
+ * Ystorm Queue Zone
+ */
+struct ystorm_eth_queue_zone {
+ struct coalescing_timeset int_coalescing_timeset
+ /* Tx interrupt coalescing TimeSet */;
+ __le16 reserved[3];
+};
+
+/*
+ * ETH doorbell data
+ */
+struct eth_db_data {
+ u8 params;
+#define ETH_DB_DATA_DEST_MASK 0x3
+#define ETH_DB_DATA_DEST_SHIFT 0
+#define ETH_DB_DATA_AGG_CMD_MASK 0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT 2
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
+#define ETH_DB_DATA_RESERVED_MASK 0x1
+#define ETH_DB_DATA_RESERVED_SHIFT 5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
new file mode 100644
index 00000000..71922654
--- /dev/null
+++ b/drivers/net/qede/base/mcp_public.h
@@ -0,0 +1,1205 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/****************************************************************************
+ *
+ * Name: mcp_public.h
+ *
+ * Description: MCP public data
+ *
+ * Created: 13/01/2013 yanivr
+ *
+ ****************************************************************************/
+
+#ifndef MCP_PUBLIC_H
+#define MCP_PUBLIC_H
+
+#define VF_MAX_STATIC 192 /* In case of AH */
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2 /* Global */
+#define MCP_GLOB_PORT_MAX 4 /* Global */
+#define MCP_GLOB_FUNC_MAX 16 /* Global */
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) \
+((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* SECTION_SIZE is calculating the size in bytes out of offsize */
+#define SECTION_SIZE(_offsize) \
+(((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) \
+(MCP_REG_SCRATCH + SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+(_pub_base + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct pmm_phy_cfg {
+ u32 speed; /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
+#define PMM_SPEED_AUTONEG 0
+#define PMM_SPEED_SMARTLINQ 0x8
+
+ u32 pause; /* bitmask */
+#define PMM_PAUSE_NONE 0x0
+#define PMM_PAUSE_AUTONEG 0x1
+#define PMM_PAUSE_RX 0x2
+#define PMM_PAUSE_TX 0x4
+
+ u32 adv_speed; /* Default should be the speed_cap_mask */
+ u32 loopback_mode;
+#define PMM_LOOPBACK_NONE 0
+#define PMM_LOOPBACK_INT_PHY 1
+#define PMM_LOOPBACK_EXT_PHY 2
+#define PMM_LOOPBACK_EXT 3
+#define PMM_LOOPBACK_MAC 4
+#define PMM_LOOPBACK_CNIG_AH_ONLY_0123 5 /* Port to itself */
+#define PMM_LOOPBACK_CNIG_AH_ONLY_2301 6 /* Port to Port */
+
+ /* features */
+ u32 feature_config_flags;
+
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+ u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter */
+ u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter */
+ u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter */
+ u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter */
+ u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter */
+ u64 r1518; /* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
+ u64 r1522; /* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged */
+ u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter */
+ u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter */
+ u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter */
+ u64 r16383; /* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame ctr */
+ u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter */
+ u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter */
+ u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter */
+ u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter */
+ u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter */
+ u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+ u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter */
+ u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+ u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+ u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+ u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+ u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */
+ u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter */
+ u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter */
+ u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter */
+ u64 t1518; /* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
+ u64 t2047; /* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
+ u64 t4095; /* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
+ u64 t9216; /* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
+ u64 t16383; /* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame ctr */
+ u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+ u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+ u64 tlpiec; /* 0x6C (Offset 0x108) Transmit Logical Type LLFC */
+ u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+ u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+ u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+ u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+ u64 rxpok; /* 0x22 (Offset 0x138) RX good frame */
+ u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+ u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+ u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+ u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+ u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct pmm_stats pmm;
+};
+
+/*-----+-----------------------------------------------------------------------
+ * Chip | Number and | Ports in| Ports in|2 PHY-s |# of ports|# of engines
+ * | rate of physical | team #1 | team #2 |are used|per path | (paths)
+ * | ports | | | | |
+ *======+==================+=========+=========+========+======================
+ * BB | 1x100G | This is special mode, where there are 2 HW func
+ * BB | 2x10/20Gbps | 0,1 | NA | No | 1 | 1
+ * BB | 2x40 Gbps | 0,1 | NA | Yes | 1 | 1
+ * BB | 2x50Gbps | 0,1 | NA | No | 1 | 1
+ * BB | 4x10Gbps | 0,2 | 1,3 | No | 1/2 | 1,2
+ * BB | 4x10Gbps | 0,1 | 2,3 | No | 1/2 | 1,2
+ * BB | 4x10Gbps | 0,3 | 1,2 | No | 1/2 | 1,2
+ * BB | 4x10Gbps | 0,1,2,3 | NA | No | 1 | 1
+ * AH | 2x10/20Gbps | 0,1 | NA | NA | 1 | NA
+ * AH | 4x10Gbps | 0,1 | 2,3 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,2 | 1,3 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,3 | 1,2 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,1,2,3 | NA | NA | 1 | NA
+ *======+==================+=========+=========+========+=======================
+ */
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM (1 << 0)
+
+#define PORT_CMT_PORT_ROLE (1 << 1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE (1 << 1)
+
+#define PORT_CMT_TEAM_MASK (1 << 2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 (1 << 2)
+};
+
+/**************************************
+ * LLDP and DCBX HSI structures
+ **************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+
+typedef enum _lldp_agent_e {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+} lldp_agent_e;
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ /* Holds local Chassis ID TLV header, subtype and 9B of payload.
+ * If firtst byte is 0, then we will use default chassis ID
+ */
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ /* Holds local Port ID TLV header, subtype and 9B of payload.
+ * If firtst byte is 0, then we will use default port ID
+ */
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status; /* TBD */
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload.
+ */
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ /* Holds remote Port ID TLV header, subtype and 9B of payload.
+ */
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+ u32 pri_tc_tbl[1];
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+#define DCBX_CEE_STRICT_PRIORITY_TC 0x7
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+ /* Not in use
+ * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
+ * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ */
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+ /* PG feature */
+ struct dcbx_ets_feature ets;
+ /* PFC feature */
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ /* APP feature */
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000003
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ /*
+ * #define DCBX_CONFIG_VERSION_MASK 0x00000003
+ * #define DCBX_CONFIG_VERSION_SHIFT 0
+ * #define DCBX_CONFIG_VERSION_DISABLED 0
+ * #define DCBX_CONFIG_VERSION_IEEE 1
+ * #define DCBX_CONFIG_VERSION_CEE 2
+ */
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/* */
+/* P U B L I C G L O B A L */
+/* */
+/**************************************/
+struct public_global {
+ u32 max_path; /* 32bit is wasty, but this will be used often */
+ u32 max_ports; /* (Global) 32bit is wasty, this will be used often */
+#define MODE_1P 1 /* TBD - NEED TO THINK OF A BETTER NAME */
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+};
+
+/**************************************/
+/* */
+/* P U B L I C P A T H */
+/* */
+/**************************************/
+
+/****************************************************************************
+ * Shared Memory 2 Region *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 128 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE 0
+#define ACCUM_ACK_PF_SHIFT 0
+
+#define ACCUM_ACK_VF_BASE 8
+#define ACCUM_ACK_VF_SHIFT 3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ /*
+ * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+ * which were disabled/flred
+ */
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; /* 0x003c */
+
+ u32 process_kill;
+ /* Reset on mcp reset, and incremented for eveny process kill event. */
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/* */
+/* P U B L I C P O R T */
+/* */
+/**************************************/
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+ struct dci_fc_npiv_cfg fc_npiv_cfg;
+ struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+/****************************************************************************
+ * Driver <-> FW Mailbox *
+ ****************************************************************************/
+
+struct public_port {
+ u32 validity_map; /* 0x0 (4*2 = 0x8) */
+
+ /* validity bits */
+#define MCP_VALIDITY_PCI_CFG 0x00100000
+#define MCP_VALIDITY_MB 0x00200000
+#define MCP_VALIDITY_DEV_INFO 0x00400000
+#define MCP_VALIDITY_RESERVED 0x00000007
+
+ /* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 /* yaniv - tbd */
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+
+ /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr; /* Points to pmm_phy_cfg (For READ-ONLY) */
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET 0
+#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
+#define LFA_NO_REASON (0 << 0)
+#define LFA_LINK_DOWN (1 << 0)
+#define LFA_FORCE_INIT (1 << 1)
+#define LFA_LOOPBACK_MISMATCH (1 << 2)
+#define LFA_SPEED_MISMATCH (1 << 3)
+#define LFA_FLOW_CTRL_MISMATCH (1 << 4)
+#define LFA_ADV_SPEED_MISMATCH (1 << 5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET 16
+#define LINK_FLAP_COUNT_MASK 0x00ff0000
+
+ u32 link_change_count;
+
+ /* LLDP params */
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+
+ /* FC_NPIV table offset & size in NVRAM value of 0 means not present */
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+ u32 transceiver_data;
+#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF
+#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define PMM_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define PMM_TRANSCEIVER_STATE_VALID 0x00000003
+#define PMM_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define PMM_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define PMM_TRANSCEIVER_TYPE_SHIFT 0x00000008
+#define PMM_TRANSCEIVER_TYPE_NONE 0x00000000
+#define PMM_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
+#define PMM_TRANSCEIVER_TYPE_1G_PCC 0x01 /* 1G Passive copper cable */
+#define PMM_TRANSCEIVER_TYPE_1G_ACC 0x02 /* 1G Active copper cable */
+#define PMM_TRANSCEIVER_TYPE_1G_LX 0x03
+#define PMM_TRANSCEIVER_TYPE_1G_SX 0x04
+#define PMM_TRANSCEIVER_TYPE_10G_SR 0x05
+#define PMM_TRANSCEIVER_TYPE_10G_LR 0x06
+#define PMM_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define PMM_TRANSCEIVER_TYPE_10G_ER 0x08
+#define PMM_TRANSCEIVER_TYPE_10G_PCC 0x09 /* 10G Passive copper cable */
+#define PMM_TRANSCEIVER_TYPE_10G_ACC 0x0a /* 10G Active copper cable */
+#define PMM_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define PMM_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define PMM_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define PMM_TRANSCEIVER_TYPE_40G_CR4 0x0e
+#define PMM_TRANSCEIVER_TYPE_100G_AOC 0x0f /* Active optical cable */
+#define PMM_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define PMM_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define PMM_TRANSCEIVER_TYPE_100G_ER4 0x12
+#define PMM_TRANSCEIVER_TYPE_100G_ACC 0x13 /* Active copper cable */
+#define PMM_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define PMM_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_S 0x16
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_M 0x18
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_L 0x1a
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define PMM_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define PMM_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define PMM_TRANSCEIVER_TYPE_25G_AOC 0x1e
+
+#define PMM_TRANSCEIVER_TYPE_4x10G 0x1d
+#define PMM_TRANSCEIVER_TYPE_4x25G_CR 0x1e
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40GR 0x30
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+};
+
+/**************************************/
+/* */
+/* P U B L I C F U N C */
+/* */
+/**************************************/
+
+struct public_func {
+ u32 dpdk_rsvd1[2];
+
+ /* MTU size per funciton is needed for the OV feature */
+ u32 mtu_size;
+ /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+ /* For PCP values 0-3 use the map lower */
+ /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+ * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+ */
+ u32 c2s_pcp_map_lower;
+ /* For PCP values 4-7 use the map upper */
+ /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+ * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+ */
+ u32 c2s_pcp_map_upper;
+
+ /* For PCP default value get the MSB byte of the map default */
+ u32 c2s_pcp_map_default;
+
+ u32 reserved[4];
+
+ /* replace old mf_cfg */
+ u32 config;
+ /* E/R/I/D */
+ /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000000
+
+ /* MINBW, MAXBW */
+ /* value range - 0..100, increments in 1 % */
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
+
+ u32 mac_upper; /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 dpdk_rsvd2[4];
+
+ u32 ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation; /* vf per pf */
+
+ u32 preserve_data; /* Will be used bt CCM */
+
+ u32 driver_last_activity_ts;
+
+ /*
+ * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+ * VFs
+ */
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+};
+
+/**************************************/
+/* */
+/* P U B L I C M B */
+/* */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+ u32 mac_upper; /* Upper 16 bits are always zeroes */
+ u32 mac_lower;
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/* statistics for ncsi */
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+union drv_union_data {
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; /* LOAD_REQ */
+ struct mcp_mac wol_mac; /* UNLOAD_DONE */
+
+ struct pmm_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64; /* For PHY / AVS commands */
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ struct drv_version_stc drv_version;
+
+ struct lan_stats_stc lan_stats;
+ u32 dpdk_rsvd[3];
+ struct ocbb_data_stc ocbb_info;
+
+ /* ... */
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+ /* Params - FORCE - Reinitialize the link regardless of LFA */
+ /* - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+
+ /* Vitaly: LLDP commands */
+#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+ /* OneView feature driver HSI */
+#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
+#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
+#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
+#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
+#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
+#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
+
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+#define DRV_MSG_CODE_MCP_HALT 0x00100000
+#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
+#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
+
+#define DRV_MSG_CODE_SET_VMAC 0x00110000
+#define DRV_MSG_CODE_GET_VMAC 0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
+
+#define DRV_MSG_CODE_GET_STATS 0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+
+#define DRV_MSG_CODE_OCBB_DATA 0x00180000
+#define DRV_MSG_CODE_SET_BW 0x00190000
+#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
+#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000
+#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1)
+
+#define DRV_MSG_CODE_GPIO_READ 0x001c0000
+#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000
+
+#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+#define DRV_MSG_CODE_EMPTY_MB 0x00220000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 drv_mb_param;
+ /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+ /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+ /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+ /* LLDP / DCBX params */
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
+#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
+#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
+#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
+
+#define DRV_MB_PARAM_PHYMOD_LANE_SHIFT 0
+#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF
+#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT 8
+#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00
+ /* configure vf MSIX params */
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+ /* OneView configuration parametres */
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
+#define DRV_MB_PARAM_OV_CURR_CFG_VC_CLP 4
+#define DRV_MB_PARAM_OV_CURR_CFG_CNU 5
+#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6
+#define DRV_MB_PARAM_OV_CURR_CFG_HII 7
+
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT 0
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_TRARGET_FOUND (1 << 2)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_LOGGED_INTO_TGT (1 << 4)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_IMG_DOWNLOADED (1 << 5)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0
+
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT 0
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
+
+#define DRV_MB_PARAM_GPIO_NUMBER_SHIFT 0
+#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF
+#define DRV_MB_PARAM_GPIO_VALUE_SHIFT 16
+#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000
+
+ u32 fw_mb_header;
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
+#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE 0x26000000
+#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE 0x27000000
+#define FW_MSG_CODE_UPDATE_BOOT_PROGRESS_DONE 0x28000000
+#define FW_MSG_CODE_UPDATE_STORM_FW_VER_DONE 0x29000000
+#define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE 0x31000000
+#define FW_MSG_CODE_DRV_MSG_CODE_BW_UPDATE_DONE 0x32000000
+#define FW_MSG_CODE_DRV_MSG_CODE_MTU_SIZE_DONE 0x33000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_FLR_ACK 0x02000000
+#define FW_MSG_CODE_FLR_NACK 0x02100000
+#define FW_MSG_CODE_SET_DRIVER_DONE 0x02200000
+#define FW_MSG_CODE_SET_VMAC_SUCCESS 0x02300000
+#define FW_MSG_CODE_SET_VMAC_FAIL 0x02400000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_PHY_OK 0x00110000
+#define FW_MSG_CODE_PHY_ERROR 0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+#define FW_MSG_CODE_OK 0x00160000
+#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
+#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
+#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE 0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000
+#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE 0x000c0000
+#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH 0x00100000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
+#define FW_MSG_CODE_GPIO_OK 0x00160000
+#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
+#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000
+#define FW_MSG_CODE_GPIO_INVALID 0x000f0000
+#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+ /*
+ * The system time is in the format of
+ * (year-2001)*12*32 + month*32 + day.
+ */
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ /*
+ * Indicate to the firmware not to go into the
+ * OS-absent when it is not getting driver pulse.
+ * This is used for debugging as well for PXE(MBA).
+ */
+
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+ /* Indicates to the driver not to assert due to lack
+ * of MCP response
+ */
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+ * Description
+ * Incremental Aggregative
+ * 8-bit MFW counter per message
+ * 8-bit ack-counter per message
+ * Capabilities
+ * Provides up to 256 aggregative message per type
+ * Provides 4 message types in dword
+ * Message type pointers to byte offset
+ * Backward Compatibility by using sizeof for the counters.
+ * No lock requires for 32bit messages
+ * Limitations:
+ * In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+ * is required to prevent data corruption.
+ **********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+#ifdef BIG_ENDIAN /* Like MFW */
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[msg_id]++;)
+#else
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[((msg_id & ~3) | ((~msg_id) & 3))]++;)
+#endif
+
+#define MFW_DRV_UPDATE(shmem_func, msg_id) \
+((u8)((u8 *)(MFW_MB_P(shmem_func)->msg))[msg_id]++;)
+
+struct public_mfw_mb {
+ u32 sup_msgs; /* Assigend with MFW_DRV_MSG_MAX */
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/* */
+/* P U B L I C D A T A */
+/* */
+/**************************************/
+enum public_sections {
+ PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
+ PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+ u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+ u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+ /* The sections fields is an array */
+ u32 num_sections;
+ offsize_t sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR 0xa0
+#define MAX_I2C_TRANSACTION_SIZE 16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256
+
+#endif /* MCP_PUBLIC_H */
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
new file mode 100644
index 00000000..7f1a60dd
--- /dev/null
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -0,0 +1,919 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/****************************************************************************
+ *
+ * Name: nvm_cfg.h
+ *
+ * Description: NVM config file - Generated file from nvm cfg excel.
+ * DO NOT MODIFY !!!
+ *
+ * Created: 1/14/2016
+ *
+ ****************************************************************************/
+
+#ifndef NVM_CFG_H
+#define NVM_CFG_H
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+ u32 mac_addr_lo;
+};
+
+/******************************************
+ * nvm_cfg1 structs
+ ******************************************/
+struct nvm_cfg1_glob {
+ u32 generic_cont0; /* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+ u32 engineering_change[3]; /* 0x4 */
+ u32 manufacturing_id; /* 0x10 */
+ u32 serial_number[4]; /* 0x14 */
+ u32 pcie_cfg; /* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
+#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK 0x00000020
+#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET 5
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
+ /* Set the duration, in seconds, fan failure signal should be
+ * sampled
+ */
+#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK 0x80000000
+#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET 31
+ u32 mgmt_traffic; /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
+#define NVM_CFG1_GLOB_AUX_MODE_MASK 0x78000000
+#define NVM_CFG1_GLOB_AUX_MODE_OFFSET 27
+#define NVM_CFG1_GLOB_AUX_MODE_DEFAULT 0x0
+#define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY 0x1
+ /* Indicates whether external thermal sonsor is available */
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK 0x80000000
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET 31
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED 0x0
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED 0x1
+ u32 core_cfg; /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG 0x1
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP 0x2
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
+ u32 e_lane_cfg1; /* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+ u32 e_lane_cfg2; /* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
+#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET 12
+#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
+ /* Maximum advertised pcie link width */
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK 0x000F0000
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET 16
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_16_LANES 0x0
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE 0x1
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES 0x2
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES 0x3
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES 0x4
+ /* ASPM L1 mode */
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK 0x00300000
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET 20
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED 0x0
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY 0x1
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK 0x01C00000
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET 22
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED 0x0
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK 0x06000000
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL 0x2
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH 0x3
+ /* Set the PLDM sensor modes */
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK 0x38000000
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET 27
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
+ u32 f_lane_cfg1; /* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+ u32 f_lane_cfg2; /* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+ /* Control the period between two successive checks */
+#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8
+ /* Set shutdown temperature */
+#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16
+ /* Set max. count for over operational temperature */
+#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000
+#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET 24
+ u32 eagle_preemphasis; /* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+ u32 eagle_driver_current; /* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+ u32 falcon_preemphasis; /* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+ u32 falcon_driver_current; /* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+ u32 pci_id; /* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
+ /* Set caution temperature */
+#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
+ /* Set external thermal sensor I2C address */
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK 0xFF000000
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24
+ u32 pci_subsys_id; /* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
+ u32 bar; /* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
+ /* Set the duration, in seconds, fan failure signal should be
+ * sampled
+ */
+#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK 0x0000F000
+#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET 12
+ u32 eagle_txfir_main; /* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+ u32 eagle_txfir_post; /* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+ u32 falcon_txfir_main; /* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+ u32 falcon_txfir_post; /* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+ u32 manufacture_ver; /* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+ u32 manufacture_time; /* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+ u32 led_global_settings; /* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+ u32 generic_cont1; /* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
+ u32 mbi_version; /* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+ u32 mbi_date; /* 0x80 */
+ u32 misc_sig; /* 0x84 */
+ /* Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+ u32 device_capabilities; /* 0x88 */
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+ u32 power_dissipated; /* 0x8C */
+#define NVM_CFG1_GLOB_POWER_DIS_D0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET 0
+#define NVM_CFG1_GLOB_POWER_DIS_D1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET 8
+#define NVM_CFG1_GLOB_POWER_DIS_D2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET 16
+#define NVM_CFG1_GLOB_POWER_DIS_D3_MASK 0xFF000000
+#define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET 24
+ u32 power_consumed; /* 0x90 */
+#define NVM_CFG1_GLOB_POWER_CONS_D0_MASK 0x000000FF
+#define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET 0
+#define NVM_CFG1_GLOB_POWER_CONS_D1_MASK 0x0000FF00
+#define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET 8
+#define NVM_CFG1_GLOB_POWER_CONS_D2_MASK 0x00FF0000
+#define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET 16
+#define NVM_CFG1_GLOB_POWER_CONS_D3_MASK 0xFF000000
+#define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET 24
+ u32 efi_version; /* 0x94 */
+ u32 reserved[42]; /* 0x98 */
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[30]; /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+ u32 reserved__m_relocated_to_option_123; /* 0x0 */
+ u32 reserved__m_relocated_to_option_124; /* 0x4 */
+ u32 generic_cont0; /* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
+#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+ u32 pcie_cfg; /* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
+ u32 features; /* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
+ u32 speed_cap_mask; /* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
+ u32 link_settings; /* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK 0x00018000
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET 15
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM 0x0
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM 0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000E0000
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_NONE 0x0
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_FIRECODE 0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_RS 0x2
+ u32 phy_cfg; /* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
+#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
+#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
+ u32 mgmt_traffic; /* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
+ u32 ext_phy; /* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+ u32 mba_cfg1; /* 0x28 */
+#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
+#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
+ u32 mba_cfg2; /* 0x2C */
+#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
+#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
+ u32 vf_cfg; /* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
+ struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
+ u32 led_port_settings; /* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
+ u32 transceiver_00; /* 0x40 */
+ /* Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
+ /* Define the GPIO mux settings to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
+ u32 device_ids; /* 0x44 */
+#define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK 0x000000FF
+#define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK 0xFF000000
+#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET 24
+ u32 board_cfg; /* 0x48 */
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
+ /* This field defines the GPIO mapped to tx_disable signal in SFP */
+#define NVM_CFG1_PORT_TX_DISABLE_MASK 0x0000FF00
+#define NVM_CFG1_PORT_TX_DISABLE_OFFSET 8
+#define NVM_CFG1_PORT_TX_DISABLE_NA 0x0
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO0 0x1
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO1 0x2
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO2 0x3
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO3 0x4
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO4 0x5
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO5 0x6
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO6 0x7
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO7 0x8
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO8 0x9
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO9 0xA
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO10 0xB
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO11 0xC
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO12 0xD
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO13 0xE
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO14 0xF
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO15 0x10
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO16 0x11
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO17 0x12
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO18 0x13
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO19 0x14
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO20 0x15
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO21 0x16
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO22 0x17
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO23 0x18
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO24 0x19
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO25 0x1A
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO26 0x1B
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO27 0x1C
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO28 0x1D
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO29 0x1E
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO30 0x1F
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO31 0x20
+ u32 reserved[131]; /* 0x4C */
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address; /* 0x0 */
+ u32 rsrv1; /* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
+ u32 rsrv2; /* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
+ u32 device_id; /* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
+ u32 cmn_cfg; /* 0x14 */
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
+ u32 pci_cfg; /* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
+ u32 preboot_generic_cfg; /* 0x2C */
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK 0x0000FFFF
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
+ u32 reserved[8]; /* 0x30 */
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob; /* 0x0 */
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
+};
+
+/******************************************
+ * nvm_cfg structs
+ ******************************************/
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#endif /* NVM_CFG_H */
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
new file mode 100644
index 00000000..3b25e1a8
--- /dev/null
+++ b/drivers/net/qede/base/reg_addr.h
@@ -0,0 +1,1107 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+ 0
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
+ 0xfff << 0)
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+ 12
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
+ 0xfff << 12)
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+ 24
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
+ 0xff << 24)
+
+#define XSDM_REG_OPERATION_GEN \
+ 0xf80408UL
+#define NIG_REG_RX_BRB_OUT_EN \
+ 0x500e18UL
+#define NIG_REG_STORM_OUT_EN \
+ 0x500e08UL
+#define PSWRQ2_REG_L2P_VALIDATE_VFID \
+ 0x240c50UL
+#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
+ 0x2aae04UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x2aa16cUL
+#define BAR0_MAP_REG_MSDM_RAM \
+ 0x1d00000UL
+#define BAR0_MAP_REG_USDM_RAM \
+ 0x1d80000UL
+#define BAR0_MAP_REG_PSDM_RAM \
+ 0x1f00000UL
+#define BAR0_MAP_REG_TSDM_RAM \
+ 0x1c80000UL
+#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+ 0x5011f4UL
+#define PRS_REG_SEARCH_TCP \
+ 0x1f0400UL
+#define PRS_REG_SEARCH_UDP \
+ 0x1f0404UL
+#define PRS_REG_SEARCH_OPENFLOW \
+ 0x1f0434UL
+#define TM_REG_PF_ENABLE_CONN \
+ 0x2c043cUL
+#define TM_REG_PF_ENABLE_TASK \
+ 0x2c0444UL
+#define TM_REG_PF_SCAN_ACTIVE_CONN \
+ 0x2c04fcUL
+#define TM_REG_PF_SCAN_ACTIVE_TASK \
+ 0x2c0500UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define QM_REG_USG_CNT_PF_TX \
+ 0x2f2eacUL
+#define QM_REG_USG_CNT_PF_OTHER \
+ 0x2f2eb0UL
+#define DORQ_REG_PF_DB_ENABLE \
+ 0x100508UL
+#define QM_REG_PF_EN \
+ 0x2f2ea4UL
+#define TCFC_REG_STRONG_ENABLE_PF \
+ 0x2d0708UL
+#define CCFC_REG_STRONG_ENABLE_PF \
+ 0x2e0708UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+ 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+ 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+ 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+ 0x2aa410UL
+#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+ 0x2aa138UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x2aa174UL
+#define MISC_REG_GEN_PURP_CR0 \
+ 0x008c80UL
+#define MCP_REG_SCRATCH \
+ 0xe20000UL
+#define CNIG_REG_NW_PORT_MODE_BB_B0 \
+ 0x218200UL
+#define MISCS_REG_CHIP_NUM \
+ 0x00976cUL
+#define MISCS_REG_CHIP_REV \
+ 0x009770UL
+#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
+ 0x00971cUL
+#define MISCS_REG_CHIP_TEST_REG \
+ 0x009778UL
+#define MISCS_REG_CHIP_METAL \
+ 0x009774UL
+#define BRB_REG_HEADER_SIZE \
+ 0x340804UL
+#define BTB_REG_HEADER_SIZE \
+ 0xdb0804UL
+#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
+ 0x1c0708UL
+#define CCFC_REG_ACTIVITY_COUNTER \
+ 0x2e8800UL
+#define CDU_REG_CID_ADDR_PARAMS \
+ 0x580900UL
+#define DBG_REG_CLIENT_ENABLE \
+ 0x010004UL
+#define DMAE_REG_INIT \
+ 0x00c000UL
+#define DORQ_REG_IFEN \
+ 0x100040UL
+#define GRC_REG_TIMEOUT_EN \
+ 0x050404UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x180040UL
+#define MCM_REG_INIT \
+ 0x1200000UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MISC_REG_PORT_MODE \
+ 0x008c00UL
+#define MISC_REG_BLOCK_256B_EN \
+ 0x008c14UL
+#define MISCS_REG_RESET_PL_HV \
+ 0x009060UL
+#define MISCS_REG_CLK_100G_MODE \
+ 0x009070UL
+#define MISCS_REG_RESET_PL_HV_2 \
+ 0x009150UL
+#define MSDM_REG_ENABLE_IN1 \
+ 0xfc0004UL
+#define MSEM_REG_ENABLE_IN \
+ 0x1800004UL
+#define NIG_REG_CM_HDR \
+ 0x500840UL
+#define NCSI_REG_CONFIG \
+ 0x040200UL
+#define PSWRQ2_REG_RBC_DONE \
+ 0x240000UL
+#define PSWRQ2_REG_CFG_DONE \
+ 0x240004UL
+#define PBF_REG_INIT \
+ 0xd80000UL
+#define PTU_REG_ATC_INIT_ARRAY \
+ 0x560000UL
+#define PCM_REG_INIT \
+ 0x1100000UL
+#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
+ 0x2a9000UL
+#define PRM_REG_DISABLE_PRM \
+ 0x230000UL
+#define PRS_REG_SOFT_RST \
+ 0x1f0000UL
+#define PSDM_REG_ENABLE_IN1 \
+ 0xfa0004UL
+#define PSEM_REG_ENABLE_IN \
+ 0x1600004UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ2_REG_CDUT_P_SIZE \
+ 0x24000cUL
+#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
+ 0x2a0040UL
+#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+ 0x29e050UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD2_REG_CONF11 \
+ 0x29d064UL
+#define PSWWR_REG_USDM_FULL_TH \
+ 0x29a040UL
+#define PSWWR2_REG_CDU_FULL_TH2 \
+ 0x29b040UL
+#define QM_REG_MAXPQSIZE_0 \
+ 0x2f0434UL
+#define RSS_REG_RSS_INIT_EN \
+ 0x238804UL
+#define RDIF_REG_STOP_ON_ERROR \
+ 0x300040UL
+#define SRC_REG_SOFT_RST \
+ 0x23874cUL
+#define TCFC_REG_ACTIVITY_COUNTER \
+ 0x2d8800UL
+#define TCM_REG_INIT \
+ 0x1180000UL
+#define TM_REG_PXP_READ_DATA_FIFO_INIT \
+ 0x2c0014UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0xfb0004UL
+#define TSEM_REG_ENABLE_IN \
+ 0x1700004UL
+#define TDIF_REG_STOP_ON_ERROR \
+ 0x310040UL
+#define UCM_REG_INIT \
+ 0x1280000UL
+#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+ 0x051004UL
+#define USDM_REG_ENABLE_IN1 \
+ 0xfd0004UL
+#define USEM_REG_ENABLE_IN \
+ 0x1900004UL
+#define XCM_REG_INIT \
+ 0x1000000UL
+#define XSDM_REG_ENABLE_IN1 \
+ 0xf80004UL
+#define XSEM_REG_ENABLE_IN \
+ 0x1400004UL
+#define YCM_REG_INIT \
+ 0x1080000UL
+#define YSDM_REG_ENABLE_IN1 \
+ 0xf90004UL
+#define YSEM_REG_ENABLE_IN \
+ 0x1500004UL
+#define XYLD_REG_SCBD_STRICT_PRIO \
+ 0x4c0000UL
+#define TMLD_REG_SCBD_STRICT_PRIO \
+ 0x4d0000UL
+#define MULD_REG_SCBD_STRICT_PRIO \
+ 0x4e0000UL
+#define YULD_REG_SCBD_STRICT_PRIO \
+ 0x4c8000UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0x008c20UL
+#define DMAE_REG_GO_C0 \
+ 0x00c048UL
+#define DMAE_REG_GO_C1 \
+ 0x00c04cUL
+#define DMAE_REG_GO_C2 \
+ 0x00c050UL
+#define DMAE_REG_GO_C3 \
+ 0x00c054UL
+#define DMAE_REG_GO_C4 \
+ 0x00c058UL
+#define DMAE_REG_GO_C5 \
+ 0x00c05cUL
+#define DMAE_REG_GO_C6 \
+ 0x00c060UL
+#define DMAE_REG_GO_C7 \
+ 0x00c064UL
+#define DMAE_REG_GO_C8 \
+ 0x00c068UL
+#define DMAE_REG_GO_C9 \
+ 0x00c06cUL
+#define DMAE_REG_GO_C10 \
+ 0x00c070UL
+#define DMAE_REG_GO_C11 \
+ 0x00c074UL
+#define DMAE_REG_GO_C12 \
+ 0x00c078UL
+#define DMAE_REG_GO_C13 \
+ 0x00c07cUL
+#define DMAE_REG_GO_C14 \
+ 0x00c080UL
+#define DMAE_REG_GO_C15 \
+ 0x00c084UL
+#define DMAE_REG_GO_C16 \
+ 0x00c088UL
+#define DMAE_REG_GO_C17 \
+ 0x00c08cUL
+#define DMAE_REG_GO_C18 \
+ 0x00c090UL
+#define DMAE_REG_GO_C19 \
+ 0x00c094UL
+#define DMAE_REG_GO_C20 \
+ 0x00c098UL
+#define DMAE_REG_GO_C21 \
+ 0x00c09cUL
+#define DMAE_REG_GO_C22 \
+ 0x00c0a0UL
+#define DMAE_REG_GO_C23 \
+ 0x00c0a4UL
+#define DMAE_REG_GO_C24 \
+ 0x00c0a8UL
+#define DMAE_REG_GO_C25 \
+ 0x00c0acUL
+#define DMAE_REG_GO_C26 \
+ 0x00c0b0UL
+#define DMAE_REG_GO_C27 \
+ 0x00c0b4UL
+#define DMAE_REG_GO_C28 \
+ 0x00c0b8UL
+#define DMAE_REG_GO_C29 \
+ 0x00c0bcUL
+#define DMAE_REG_GO_C30 \
+ 0x00c0c0UL
+#define DMAE_REG_GO_C31 \
+ 0x00c0c4UL
+#define DMAE_REG_CMD_MEM \
+ 0x00c800UL
+#define QM_REG_MAXPQSIZETXSEL_0 \
+ 0x2f0440UL
+#define QM_REG_SDMCMDREADY \
+ 0x2f1e10UL
+#define QM_REG_SDMCMDADDR \
+ 0x2f1e04UL
+#define QM_REG_SDMCMDDATALSB \
+ 0x2f1e08UL
+#define QM_REG_SDMCMDDATAMSB \
+ 0x2f1e0cUL
+#define QM_REG_SDMCMDGO \
+ 0x2f1e14UL
+#define QM_REG_RLPFCRD \
+ 0x2f4d80UL
+#define QM_REG_RLPFINCVAL \
+ 0x2f4c80UL
+#define QM_REG_RLGLBLCRD \
+ 0x2f4400UL
+#define QM_REG_RLGLBLINCVAL \
+ 0x2f3400UL
+#define IGU_REG_ATTENTION_ENABLE \
+ 0x18083cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x180820UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x180824UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0x008400UL
+#define CAU_REG_SB_ADDR_MEMORY \
+ 0x1c8000UL
+#define CAU_REG_SB_VAR_MEMORY \
+ 0x1c6000UL
+#define CAU_REG_PI_MEMORY \
+ 0x1d0000UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x180800UL
+#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+ 0x00849cUL
+#define MISC_REG_AEU_MASK_ATTN_IGU \
+ 0x008494UL
+#define IGU_REG_CLEANUP_STATUS_0 \
+ 0x180980UL
+#define IGU_REG_CLEANUP_STATUS_1 \
+ 0x180a00UL
+#define IGU_REG_CLEANUP_STATUS_2 \
+ 0x180a80UL
+#define IGU_REG_CLEANUP_STATUS_3 \
+ 0x180b00UL
+#define IGU_REG_CLEANUP_STATUS_4 \
+ 0x180b80UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x180840UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x180848UL
+#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
+ 0x1 << 1)
+#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
+ 0x1 << 0)
+#define IGU_REG_MAPPING_MEMORY \
+ 0x184000UL
+#define MISCS_REG_GENERIC_POR_0 \
+ 0x0096d4UL
+#define MCP_REG_NVM_CFG4 \
+ 0xe0642cUL
+#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
+ 0x7 << 0)
+#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+ 0
+#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL
+#define CNIG_REG_PMEG_IF_CMD_BB_B0 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB_B0 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB_B0 0x218228UL
+#define NWM_REG_MAC0 0x800400UL
+#define NWM_REG_MAC0_SIZE 256
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG 0x000008UL
+#define MISC_REG_RESET_PL_PDA_VAUX 0x008090UL
+#define MISC_REG_XMAC_CORE_PORT_MODE 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE 0x008c04UL
+#define XMAC_REG_MODE 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE 0x210040UL
+#define XMAC_REG_TX_CTRL_LO 0x210020UL
+#define XMAC_REG_CTRL 0x210000UL
+#define XMAC_REG_RX_CTRL 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1 << 12)
+#define MISC_REG_CLK_100G_MODE 0x008c10UL
+#define MISC_REG_OPTE_MODE 0x008c0cUL
+#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define PRS_REG_SEARCH_TAG1 0x1f0444UL
+#define PRS_REG_SEARCH_TCP_FIRST_FRAG 0x1f0410UL
+#define MISCS_REG_PLL_MAIN_CTRL_4 0x00974cUL
+#define MISCS_REG_ECO_RESERVED 0x0097b4UL
+#define PGLUE_B_REG_PF_BAR0_SIZE 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE 0x2aae64UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define XMAC_REG_CTRL_TX_EN (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN (0x1 << 1)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define PSWRQ2_REG_ILT_MEMORY 0x260000UL
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_0 0x50160cUL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_0 0x501f88UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_1 0x501610UL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_1 0x501f8cUL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 0x5015e4UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0 0x501f58UL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 0x5015e8UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 0x501f5cUL
+#define NIG_REG_LB_ARB_CLIENT_IS_STRICT 0x5015c0UL
+#define NIG_REG_TX_ARB_CLIENT_IS_STRICT 0x501f34UL
+#define NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ 0x5015c4UL
+#define NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x501f38UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL 0x501f1cUL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD 0x501f20UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE 0x501f24UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE 0x501f28UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_LB_BRBRATELIMIT_CTRL 0x50150cUL
+#define NIG_REG_LB_BRBRATELIMIT_INC_PERIOD 0x501510UL
+#define NIG_REG_LB_BRBRATELIMIT_INC_VALUE 0x501514UL
+#define NIG_REG_LB_BRBRATELIMIT_MAX_VALUE 0x501518UL
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT 1
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0 0x501520UL
+#define NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 0x501540UL
+#define NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 0x501560UL
+#define NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 0x501580UL
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT 0
+#define NIG_REG_PRIORITY_FOR_TC_0 0x501bccUL
+#define NIG_REG_RX_TC0_PRIORITY_MASK 0x501becUL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 0x1f0540UL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 0x1f0534UL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 0x1f053cUL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 0x1f0530UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_STRICT 0x1f0514UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ 0x1f0518UL
+#define BRB_REG_TOTAL_MAC_SIZE 0x3408c0UL
+#define BRB_REG_SHARED_HR_AREA 0x340880UL
+#define BRB_REG_TC_GUARANTIED_0 0x340900UL
+#define BRB_REG_MAIN_TC_GUARANTIED_HYST_0 0x340978UL
+#define BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 0x340c60UL
+#define BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 0x340d38UL
+#define BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 0x340ab0UL
+#define BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 0x340b88UL
+#define BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 0x340c00UL
+#define BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 0x340cd8UL
+#define BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 0x340a50UL
+#define BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 0x340b28UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define NIG_REG_PKT_PRIORITY_TO_TC 0x501ba4UL
+#define PGLUE_B_REG_START_INIT_PTT_GTT 0x2a8008UL
+#define PGLUE_B_REG_INIT_DONE_PTT_GTT 0x2a800cUL
+#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL
+#define MCP_REG_CPU_STATE 0xe05004UL
+#define MCP_REG_CPU_MODE 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT (0x1 << 10)
+#define MCP_REG_CPU_EVENT_MASK 0xe05008UL
+#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL
+#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL
+#define PSWHST_REG_VF_DISABLED_ERROR_DATA 0x2a005cUL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH 0x2a006cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 0x050050UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 0x2aae80UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL
+#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1 << 10)
+#define DORQ_REG_DB_DROP_REASON 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL
+#define TM_REG_INT_STS_1 0x2c0190UL
+#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1 << 6)
+#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_MASK_1 0x2c0194UL
+#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1 << 6)
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL
+#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL
+#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL
+#define YSEM_REG_FAST_MEMORY 0x1540000UL
+#define NIG_REG_FLOWCTRL_MODE 0x501ba0UL
+#define TSEM_REG_FAST_MEMORY 0x1740000UL
+#define TSEM_REG_DBG_FRAME_MODE 0x1701408UL
+#define TSEM_REG_SLOW_DBG_ACTIVE 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE 0x1701404UL
+#define TSEM_REG_DBG_MODE1_CFG 0x1701420UL
+#define TSEM_REG_SYNC_DBG_EMPTY 0x1701160UL
+#define TSEM_REG_SLOW_DBG_EMPTY 0x1701140UL
+#define TCM_REG_CTX_RBC_ACCS 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX 0x11814c4UL
+#define TCM_REG_SM_CON_CTX 0x11814ccUL
+#define TCM_REG_AGG_TASK_CTX 0x11814c8UL
+#define TCM_REG_SM_TASK_CTX 0x11814d0UL
+#define MSEM_REG_FAST_MEMORY 0x1840000UL
+#define MSEM_REG_DBG_FRAME_MODE 0x1801408UL
+#define MSEM_REG_SLOW_DBG_ACTIVE 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE 0x1801404UL
+#define MSEM_REG_DBG_MODE1_CFG 0x1801420UL
+#define MSEM_REG_SYNC_DBG_EMPTY 0x1801160UL
+#define MSEM_REG_SLOW_DBG_EMPTY 0x1801140UL
+#define MCM_REG_CTX_RBC_ACCS 0x1201800UL
+#define MCM_REG_AGG_CON_CTX 0x1201804UL
+#define MCM_REG_SM_CON_CTX 0x120180cUL
+#define MCM_REG_AGG_TASK_CTX 0x1201808UL
+#define MCM_REG_SM_TASK_CTX 0x1201810UL
+#define USEM_REG_FAST_MEMORY 0x1940000UL
+#define USEM_REG_DBG_FRAME_MODE 0x1901408UL
+#define USEM_REG_SLOW_DBG_ACTIVE 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE 0x1901404UL
+#define USEM_REG_DBG_MODE1_CFG 0x1901420UL
+#define USEM_REG_SYNC_DBG_EMPTY 0x1901160UL
+#define USEM_REG_SLOW_DBG_EMPTY 0x1901140UL
+#define UCM_REG_CTX_RBC_ACCS 0x1281700UL
+#define UCM_REG_AGG_CON_CTX 0x1281704UL
+#define UCM_REG_SM_CON_CTX 0x128170cUL
+#define UCM_REG_AGG_TASK_CTX 0x1281708UL
+#define UCM_REG_SM_TASK_CTX 0x1281710UL
+#define XSEM_REG_FAST_MEMORY 0x1440000UL
+#define XSEM_REG_DBG_FRAME_MODE 0x1401408UL
+#define XSEM_REG_SLOW_DBG_ACTIVE 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE 0x1401404UL
+#define XSEM_REG_DBG_MODE1_CFG 0x1401420UL
+#define XSEM_REG_SYNC_DBG_EMPTY 0x1401160UL
+#define XSEM_REG_SLOW_DBG_EMPTY 0x1401140UL
+#define XCM_REG_CTX_RBC_ACCS 0x1001800UL
+#define XCM_REG_AGG_CON_CTX 0x1001804UL
+#define XCM_REG_SM_CON_CTX 0x1001808UL
+#define YSEM_REG_DBG_FRAME_MODE 0x1501408UL
+#define YSEM_REG_SLOW_DBG_ACTIVE 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE 0x1501404UL
+#define YSEM_REG_DBG_MODE1_CFG 0x1501420UL
+#define YSEM_REG_SYNC_DBG_EMPTY 0x1501160UL
+#define YCM_REG_CTX_RBC_ACCS 0x1081800UL
+#define YCM_REG_AGG_CON_CTX 0x1081804UL
+#define YCM_REG_SM_CON_CTX 0x108180cUL
+#define YCM_REG_AGG_TASK_CTX 0x1081808UL
+#define YCM_REG_SM_TASK_CTX 0x1081810UL
+#define PSEM_REG_FAST_MEMORY 0x1640000UL
+#define PSEM_REG_DBG_FRAME_MODE 0x1601408UL
+#define PSEM_REG_SLOW_DBG_ACTIVE 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE 0x1601404UL
+#define PSEM_REG_DBG_MODE1_CFG 0x1601420UL
+#define PSEM_REG_SYNC_DBG_EMPTY 0x1601160UL
+#define PSEM_REG_SLOW_DBG_EMPTY 0x1601140UL
+#define PCM_REG_CTX_RBC_ACCS 0x1101440UL
+#define PCM_REG_SM_CON_CTX 0x1101444UL
+#define GRC_REG_DBG_SELECT 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE 0x0500a8UL
+#define GRC_REG_DBG_SHIFT 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME 0x0500b4UL
+#define PGLUE_B_REG_DBG_SELECT 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME 0x2a8410UL
+#define CNIG_REG_DBG_SELECT_K2 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 0x218264UL
+#define NCSI_REG_DBG_SELECT 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE 0x040478UL
+#define NCSI_REG_DBG_SHIFT 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME 0x040484UL
+#define BMB_REG_DBG_SELECT 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE 0x540a80UL
+#define BMB_REG_DBG_SHIFT 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME 0x540a8cUL
+#define PCIE_REG_DBG_SELECT 0x0547e8UL
+#define PHY_PCIE_REG_DBG_SELECT 0x629fe8UL
+#define PCIE_REG_DBG_DWORD_ENABLE 0x0547ecUL
+#define PHY_PCIE_REG_DBG_DWORD_ENABLE 0x629fecUL
+#define PCIE_REG_DBG_SHIFT 0x0547f0UL
+#define PHY_PCIE_REG_DBG_SHIFT 0x629ff0UL
+#define PCIE_REG_DBG_FORCE_VALID 0x0547f4UL
+#define PHY_PCIE_REG_DBG_FORCE_VALID 0x629ff4UL
+#define PCIE_REG_DBG_FORCE_FRAME 0x0547f8UL
+#define PHY_PCIE_REG_DBG_FORCE_FRAME 0x629ff8UL
+#define MCP2_REG_DBG_SELECT 0x052400UL
+#define MCP2_REG_DBG_SHIFT 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME 0x052444UL
+#define PSWHST_REG_DBG_SELECT 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME 0x2a0110UL
+#define PSWHST2_REG_DBG_SELECT 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME 0x29e068UL
+#define PSWRD_REG_DBG_DWORD_ENABLE 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME 0x29d410UL
+#define PSWWR_REG_DBG_SELECT 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME 0x29a094UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME 0x280030UL
+#define PSWRQ2_REG_DBG_SELECT 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME 0x240110UL
+#define PGLCS_REG_DBG_SELECT 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME 0x001d24UL
+#define PTU_REG_DBG_SELECT 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE 0x560104UL
+#define PTU_REG_DBG_SHIFT 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME 0x560110UL
+#define DMAE_REG_DBG_SELECT 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE 0x00c514UL
+#define DMAE_REG_DBG_SHIFT 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME 0x00c520UL
+#define TCM_REG_DBG_SELECT 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE 0x1180044UL
+#define TCM_REG_DBG_SHIFT 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME 0x1180050UL
+#define MCM_REG_DBG_SELECT 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE 0x1200044UL
+#define MCM_REG_DBG_SHIFT 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME 0x1200050UL
+#define UCM_REG_DBG_SELECT 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE 0x1280054UL
+#define UCM_REG_DBG_SHIFT 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME 0x1280060UL
+#define XCM_REG_DBG_SELECT 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE 0x1000044UL
+#define XCM_REG_DBG_SHIFT 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME 0x1000050UL
+#define YCM_REG_DBG_SELECT 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE 0x1080044UL
+#define YCM_REG_DBG_SHIFT 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME 0x1080050UL
+#define PCM_REG_DBG_SELECT 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE 0x1100044UL
+#define PCM_REG_DBG_SHIFT 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME 0x1100050UL
+#define QM_REG_DBG_SELECT 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE 0x2f2e78UL
+#define QM_REG_DBG_SHIFT 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME 0x2f2e84UL
+#define TM_REG_DBG_SELECT 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE 0x2c07acUL
+#define TM_REG_DBG_SHIFT 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME 0x2c07b8UL
+#define DORQ_REG_DBG_SELECT 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME 0x100ae0UL
+#define BRB_REG_DBG_SELECT 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE 0x340ed4UL
+#define BRB_REG_DBG_SHIFT 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME 0x340ee0UL
+#define SRC_REG_DBG_SELECT 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE 0x238704UL
+#define SRC_REG_DBG_SHIFT 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME 0x238710UL
+#define PRS_REG_DBG_SELECT 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME 0x1f0ba4UL
+#define TSDM_REG_DBG_SELECT 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME 0xfc0e38UL
+#define USDM_REG_DBG_SELECT 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME 0xfd0e38UL
+#define XSDM_REG_DBG_SELECT 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME 0xf80e38UL
+#define YSDM_REG_DBG_SELECT 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME 0xf90e38UL
+#define PSDM_REG_DBG_SELECT 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME 0xfa0e38UL
+#define TSEM_REG_DBG_SELECT 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE 0x170152cUL
+#define TSEM_REG_DBG_SHIFT 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME 0x1701538UL
+#define MSEM_REG_DBG_SELECT 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE 0x180152cUL
+#define MSEM_REG_DBG_SHIFT 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME 0x1801538UL
+#define USEM_REG_DBG_SELECT 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE 0x190152cUL
+#define USEM_REG_DBG_SHIFT 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME 0x1901538UL
+#define XSEM_REG_DBG_SELECT 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE 0x140152cUL
+#define XSEM_REG_DBG_SHIFT 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME 0x1401538UL
+#define YSEM_REG_DBG_SELECT 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE 0x150152cUL
+#define YSEM_REG_DBG_SHIFT 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME 0x1501538UL
+#define PSEM_REG_DBG_SELECT 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE 0x160152cUL
+#define PSEM_REG_DBG_SHIFT 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME 0x1601538UL
+#define RSS_REG_DBG_SELECT 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE 0x238c50UL
+#define RSS_REG_DBG_SHIFT 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME 0x238c5cUL
+#define TMLD_REG_DBG_SELECT 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME 0x4d1610UL
+#define MULD_REG_DBG_SELECT 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE 0x4e1604UL
+#define MULD_REG_DBG_SHIFT 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME 0x4e1610UL
+#define YULD_REG_DBG_SELECT 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE 0x4c9604UL
+#define YULD_REG_DBG_SHIFT 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME 0x4c9610UL
+#define XYLD_REG_DBG_SELECT 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME 0x4c1610UL
+#define PRM_REG_DBG_SELECT 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE 0x2306acUL
+#define PRM_REG_DBG_SHIFT 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME 0x2306b8UL
+#define PBF_PB1_REG_DBG_SELECT 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME 0xda4738UL
+#define RPB_REG_DBG_SELECT 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE 0x23c72cUL
+#define RPB_REG_DBG_SHIFT 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME 0x23c738UL
+#define BTB_REG_DBG_SELECT 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME 0xdb08d8UL
+#define PBF_REG_DBG_SELECT 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE 0xd80064UL
+#define PBF_REG_DBG_SHIFT 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME 0xd80070UL
+#define RDIF_REG_DBG_SELECT 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE 0x300504UL
+#define RDIF_REG_DBG_SHIFT 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME 0x300510UL
+#define TDIF_REG_DBG_SELECT 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE 0x310504UL
+#define TDIF_REG_DBG_SHIFT 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME 0x310510UL
+#define CDU_REG_DBG_SELECT 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE 0x580708UL
+#define CDU_REG_DBG_SHIFT 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME 0x580714UL
+#define CCFC_REG_DBG_SELECT 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME 0x2e0510UL
+#define TCFC_REG_DBG_SELECT 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME 0x2d0510UL
+#define IGU_REG_DBG_SELECT 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE 0x18157cUL
+#define IGU_REG_DBG_SHIFT 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME 0x181588UL
+#define CAU_REG_DBG_SELECT 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME 0x1c0eb8UL
+#define UMAC_REG_DBG_SELECT 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE 0x051098UL
+#define UMAC_REG_DBG_SHIFT 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME 0x0510a4UL
+#define NIG_REG_DBG_SELECT 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE 0x502144UL
+#define NIG_REG_DBG_SHIFT 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME 0x502150UL
+#define WOL_REG_DBG_SELECT 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE 0x600144UL
+#define WOL_REG_DBG_SHIFT 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME 0x600150UL
+#define BMBN_REG_DBG_SELECT 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE 0x610144UL
+#define BMBN_REG_DBG_SHIFT 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME 0x610150UL
+#define NWM_REG_DBG_SELECT 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE 0x8000f0UL
+#define NWM_REG_DBG_SHIFT 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME 0x8000fcUL
+#define BRB_REG_BIG_RAM_ADDRESS 0x340800UL
+#define BRB_REG_BIG_RAM_DATA 0x341500UL
+#define BTB_REG_BIG_RAM_ADDRESS 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS 0x540800UL
+#define BMB_REG_BIG_RAM_DATA 0x540f00UL
+#define MISCS_REG_RESET_PL_UA 0x009050UL
+#define MISC_REG_RESET_PL_UA 0x008050UL
+#define MISC_REG_RESET_PL_HV 0x008060UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 0x008080UL
+#define SEM_FAST_REG_INT_RAM 0x020000UL
+#define DBG_REG_DBG_BLOCK_ON 0x010454UL
+#define DBG_REG_FRAMING_MODE 0x010058UL
+#define SEM_FAST_REG_DEBUG_MODE 0x000744UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_FILTER_CID 0x000754UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_STRT 0x000760UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_END 0x000764UL
+#define SEM_FAST_REG_FILTER_EVENT_ID 0x000758UL
+#define SEM_FAST_REG_EVENT_ID_MASK 0x00075cUL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_FILTER_CNSTR_OPRTN_0 0x010a28UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0 0x01071cUL
+#define DBG_REG_FILTER_CNSTR_DATA_0 0x0109d8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0 0x01059cUL
+#define DBG_REG_FILTER_CNSTR_DATA_MASK_0 0x0109f8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0 0x01065cUL
+#define DBG_REG_FILTER_CNSTR_FRAME_0 0x0109e8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0 0x0105fcUL
+#define DBG_REG_FILTER_CNSTR_FRAME_MASK_0 0x010a08UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0 0x0106bcUL
+#define DBG_REG_FILTER_CNSTR_OFFSET_0 0x010a18UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0 0x0107dcUL
+#define DBG_REG_FILTER_CNSTR_RANGE_0 0x010a38UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0 0x01077cUL
+#define DBG_REG_FILTER_CNSTR_CYCLIC_0 0x010a68UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0 0x0108fcUL
+#define DBG_REG_FILTER_CNSTR_MUST_0 0x010a48UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0 0x01083cUL
+#define DBG_REG_INTR_BUFFER 0x014000UL
+#define DBG_REG_INTR_BUFFER_WR_PTR 0x010404UL
+#define DBG_REG_WRAP_ON_INT_BUFFER 0x010418UL
+#define DBG_REG_INTR_BUFFER_RD_PTR 0x010400UL
+#define DBG_REG_EXT_BUFFER_WR_PTR 0x010410UL
+#define DBG_REG_WRAP_ON_EXT_BUFFER 0x01041cUL
+#define SEM_FAST_REG_STALL_0 0x000488UL
+#define SEM_FAST_REG_STALLED 0x000494UL
+#define SEM_FAST_REG_STORM_REG_FILE 0x008000UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define RSS_REG_RSS_RAM_ADDR 0x238c30UL
+#define RSS_REG_RSS_RAM_DATA 0x238c20UL
+#define MISCS_REG_BLOCK_256B_EN 0x009074UL
+#define MCP_REG_CPU_REG_FILE 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE 32
+#define DBG_REG_CALENDAR_OUT_DATA 0x010480UL
+#define DBG_REG_FULL_MODE 0x010060UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB 0x010430UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB 0x010434UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_PCI_EXT_BUFFER_SIZE 0x010438UL
+#define DBG_REG_PCI_FUNC_NUM 0x010a98UL
+#define DBG_REG_PCI_LOGIC_ADDR 0x010460UL
+#define DBG_REG_PCI_REQ_CREDIT 0x010440UL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OTHER_ENGINE_MODE 0x010010UL
+#define NIG_REG_DEBUG_PORT 0x5020d0UL
+#define DBG_REG_ETHERNET_HDR_WIDTH 0x010b38UL
+#define DBG_REG_ETHERNET_HDR_7 0x010b34UL
+#define DBG_REG_ETHERNET_HDR_6 0x010b30UL
+#define DBG_REG_ETHERNET_HDR_5 0x010b2cUL
+#define DBG_REG_ETHERNET_HDR_4 0x010b28UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_NIG_DATA_LIMIT_SIZE 0x01043cUL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_TIMESTAMP_FRAME_EN 0x010b54UL
+#define DBG_REG_TIMESTAMP_TICK 0x010b50UL
+#define DBG_REG_FILTER_ID_NUM 0x0109d4UL
+#define DBG_REG_FILTER_MSG_LENGTH_ENABLE 0x010a78UL
+#define DBG_REG_FILTER_MSG_LENGTH 0x010a7cUL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS 0x010a90UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES 0x010a94UL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE 0x010a88UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE 0x010a8cUL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_TRIGGER_STATE_ID_0 0x010554UL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 0x01095cUL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 0x010968UL
+#define DBG_REG_TRIGGER_STATE_SET_COUNT_0 0x010584UL
+#define DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 0x01056cUL
+#define DBG_REG_NO_GRANT_ON_FULL 0x010458UL
+#define DBG_REG_STORM_ID_NUM 0x010b14UL
+#define DBG_REG_CALENDAR_SLOT0 0x010014UL
+#define DBG_REG_HW_ID_NUM 0x010b10UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TIMESTAMP 0x010b4cUL
+#define DBG_REG_CPU_TIMEOUT 0x010450UL
+#define DBG_REG_TRIGGER_STATUS_CUR_STATE 0x010b60UL
+#define GRC_REG_TRACE_FIFO_VALID_DATA 0x050064UL
+#define GRC_REG_TRACE_FIFO 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x181530UL
+#define IGU_REG_ERROR_HANDLING_MEMORY 0x181520UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW 0x050500UL
+#define TSEM_REG_VF_ERROR 0x1700408UL
+#define USEM_REG_VF_ERROR 0x1900408UL
+#define MSEM_REG_VF_ERROR 0x1800408UL
+#define XSEM_REG_VF_ERROR 0x1400408UL
+#define YSEM_REG_VF_ERROR 0x1500408UL
+#define PSEM_REG_VF_ERROR 0x1600408UL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x2aa118UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT 0x180408UL
+#define IGU_REG_VF_CONFIGURATION 0x180804UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE 0x2a0800UL
+#define DORQ_REG_VF_USAGE_CNT 0x1009c4UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 0xd806ccUL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 0xd806c8UL
+#define PRS_REG_MSG_CT_MAIN_0 0x1f0a24UL
+#define PRS_REG_MSG_CT_LB_0 0x1f0a28UL
+#define BRB_REG_PER_TC_COUNTERS 0x341a00UL
+
+/* added */
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define MISCS_REG_FUNCTION_HIDE 0x0096f0UL
+#define PCIE_REG_PRTY_MASK 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE 0x2aaeb4UL
+#define BAR0_MAP_REG_YSDM_RAM 0x1e80000UL
+#define SEM_FAST_REG_INT_RAM_SIZE 20480
+#define MCP_REG_SCRATCH_SIZE 57344
+
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
new file mode 100644
index 00000000..b6f6487d
--- /dev/null
+++ b/drivers/net/qede/qede_eth_if.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_ethdev.h"
+
+static int
+qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
+{
+ int rc, i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+ u8 tx_switching = 0;
+ struct ecore_sp_vport_start_params start = { 0 };
+
+ start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO :
+ ECORE_TPA_MODE_NONE;
+ start.remove_inner_vlan = p_params->remove_inner_vlan;
+ start.tx_switching = tx_switching;
+ start.only_untagged = false; /* untagged only */
+ start.drop_ttl0 = p_params->drop_ttl0;
+ start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.handle_ptp_pkts = p_params->handle_ptp_pkts;
+ start.vport_id = p_params->vport_id;
+ start.max_buffers_per_cqe = 16; /* TODO-is this right */
+ start.mtu = p_params->mtu;
+ /* @DPDK - Disable FW placement */
+ start.zero_placement_offset = 1;
+
+ rc = ecore_sp_vport_start(p_hwfn, &start);
+ if (rc) {
+ DP_ERR(edev, "Failed to start VPORT\n");
+ return rc;
+ }
+
+ ecore_hw_start_fastpath(p_hwfn);
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "Started V-PORT %d with MTU %d\n",
+ p_params->vport_id, p_params->mtu);
+ }
+
+ ecore_reset_vport_stats(edev);
+
+ return 0;
+}
+
+static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
+{
+ int rc, i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_vport_stop(p_hwfn,
+ p_hwfn->hw_info.opaque_fid, vport_id);
+
+ if (rc) {
+ DP_ERR(edev, "Failed to stop VPORT\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
+{
+ struct ecore_sp_vport_update_params sp_params;
+ struct ecore_rss_params sp_rss_params;
+ int rc, i;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+ /* Translate protocol params into sp params */
+ sp_params.vport_id = params->vport_id;
+ sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
+ sp_params.vport_active_rx_flg = params->vport_active_flg;
+ sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.update_inner_vlan_removal_flg =
+ params->update_inner_vlan_removal_flg;
+ sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
+ sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+ sp_params.tx_switching_flg = params->tx_switching_flg;
+ sp_params.accept_any_vlan = params->accept_any_vlan;
+ sp_params.update_accept_any_vlan_flg =
+ params->update_accept_any_vlan_flg;
+
+ /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+ * We need to re-fix the rss values per engine for CMT.
+ */
+
+ if (edev->num_hwfns > 1 && params->update_rss_flg) {
+ struct qed_update_vport_rss_params *rss = &params->rss_params;
+ int k, max = 0;
+
+ /* Find largest entry, since it's possible RSS needs to
+ * be disabled [in case only 1 queue per-hwfn]
+ */
+ for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
+ max = (max > rss->rss_ind_table[k]) ?
+ max : rss->rss_ind_table[k];
+
+ /* Either fix RSS values or disable RSS */
+ if (edev->num_hwfns < max + 1) {
+ int divisor = (max + edev->num_hwfns - 1) /
+ edev->num_hwfns;
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "CMT - fixing RSS values (modulo %02x)\n",
+ divisor);
+
+ for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
+ rss->rss_ind_table[k] =
+ rss->rss_ind_table[k] % divisor;
+ } else {
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ params->update_rss_flg = 0;
+ }
+ }
+
+ /* Now, update the RSS configuration for actual configuration */
+ if (params->update_rss_flg) {
+ sp_rss_params.update_rss_config = 1;
+ sp_rss_params.rss_enable = 1;
+ sp_rss_params.update_rss_capabilities = 1;
+ sp_rss_params.update_rss_ind_table = 1;
+ sp_rss_params.update_rss_key = 1;
+ sp_rss_params.rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
+ ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
+ sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
+ rte_memcpy(sp_rss_params.rss_ind_table,
+ params->rss_params.rss_ind_table,
+ ECORE_RSS_IND_TABLE_SIZE * sizeof(uint16_t));
+ rte_memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+ ECORE_RSS_KEY_SIZE * sizeof(uint32_t));
+ }
+ sp_params.rss_params = &sp_rss_params;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &sp_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "Failed to update VPORT\n");
+ return rc;
+ }
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "Updated V-PORT %d: active_flag %d [update %d]\n",
+ params->vport_id, params->vport_active_flg,
+ params->update_vport_active_flg);
+ }
+
+ return 0;
+}
+
+static int
+qed_start_rxq(struct ecore_dev *edev,
+ uint8_t rss_id, uint8_t rx_queue_id,
+ uint8_t vport_id, uint16_t sb,
+ uint8_t sb_index, uint16_t bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
+{
+ struct ecore_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = rss_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+
+ rc = ecore_sp_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ rx_queue_id / edev->num_hwfns,
+ vport_id,
+ vport_id,
+ sb,
+ sb_index,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size, pp_prod);
+
+ if (rc) {
+ DP_ERR(edev, "Failed to start RXQ#%d\n", rx_queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ rx_queue_id, rss_id, vport_id, sb);
+
+ return 0;
+}
+
+static int
+qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
+{
+ int rc, hwfn_index;
+ struct ecore_hwfn *p_hwfn;
+
+ hwfn_index = params->rss_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+
+ rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
+ params->rx_queue_id / edev->num_hwfns,
+ params->eq_completion_only, false);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qed_start_txq(struct ecore_dev *edev,
+ uint8_t rss_id, uint16_t tx_queue_id,
+ uint8_t vport_id, uint16_t sb,
+ uint8_t sb_index,
+ dma_addr_t pbl_addr,
+ uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
+{
+ struct ecore_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = rss_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+
+ rc = ecore_sp_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ tx_queue_id / edev->num_hwfns,
+ vport_id,
+ vport_id,
+ sb,
+ sb_index,
+ pbl_addr, pbl_size, pp_doorbell);
+
+ if (rc) {
+ DP_ERR(edev, "Failed to start TXQ#%d\n", tx_queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(edev, ECORE_MSG_SPQ,
+ "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+ tx_queue_id, rss_id, vport_id, sb);
+
+ return 0;
+}
+
+static int
+qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
+{
+ struct ecore_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = params->rss_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+
+ rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
+ params->tx_queue_id / edev->num_hwfns);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qed_fp_cqe_completion(struct ecore_dev *edev,
+ uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
+{
+ return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
+ cqe);
+}
+
+static int qed_fastpath_stop(struct ecore_dev *edev)
+{
+ ecore_hw_stop_fastpath(edev);
+
+ return 0;
+}
+
+static void
+qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
+{
+ ecore_get_vport_stats(edev, stats);
+}
+
+static int
+qed_configure_filter_ucast(struct ecore_dev *edev,
+ struct qed_filter_ucast_params *params)
+{
+ struct ecore_filter_ucast ucast;
+
+ if (!params->vlan_valid && !params->mac_valid) {
+ DP_NOTICE(edev, true,
+ "Tried configuring a unicast filter,"
+ "but both MAC and VLAN are not set\n");
+ return -EINVAL;
+ }
+
+ memset(&ucast, 0, sizeof(ucast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ ucast.opcode = ECORE_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ ucast.opcode = ECORE_FILTER_REMOVE;
+ break;
+ case QED_FILTER_XCAST_TYPE_REPLACE:
+ ucast.opcode = ECORE_FILTER_REPLACE;
+ break;
+ default:
+ DP_NOTICE(edev, true, "Unknown unicast filter type %d\n",
+ params->type);
+ }
+
+ if (params->vlan_valid && params->mac_valid) {
+ ucast.type = ECORE_FILTER_MAC_VLAN;
+ ether_addr_copy((struct ether_addr *)&params->mac,
+ (struct ether_addr *)&ucast.mac);
+ ucast.vlan = params->vlan;
+ } else if (params->mac_valid) {
+ ucast.type = ECORE_FILTER_MAC;
+ ether_addr_copy((struct ether_addr *)&params->mac,
+ (struct ether_addr *)&ucast.mac);
+ } else {
+ ucast.type = ECORE_FILTER_VLAN;
+ ucast.vlan = params->vlan;
+ }
+
+ ucast.is_rx_filter = true;
+ ucast.is_tx_filter = true;
+
+ return ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+}
+
+static int
+qed_configure_filter_mcast(struct ecore_dev *edev,
+ struct qed_filter_mcast_params *params)
+{
+ struct ecore_filter_mcast mcast;
+ int i;
+
+ memset(&mcast, 0, sizeof(mcast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ mcast.opcode = ECORE_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ mcast.opcode = ECORE_FILTER_REMOVE;
+ break;
+ default:
+ DP_NOTICE(edev, true, "Unknown multicast filter type %d\n",
+ params->type);
+ }
+
+ mcast.num_mc_addrs = params->num;
+ for (i = 0; i < mcast.num_mc_addrs; i++)
+ ether_addr_copy((struct ether_addr *)&params->mac[i],
+ (struct ether_addr *)&mcast.mac[i]);
+
+ return ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+}
+
+int qed_configure_filter_rx_mode(struct ecore_dev *edev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct ecore_filter_accept_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ flags.update_rx_mode_config = 1;
+ flags.update_tx_mode_config = 1;
+ flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ if (IS_VF(edev)) {
+ flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+ }
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+ QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED;
+ }
+
+ return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+ ECORE_SPQ_MODE_CB, NULL);
+}
+
+static int
+qed_configure_filter(struct ecore_dev *edev, struct qed_filter_params *params)
+{
+ switch (params->type) {
+ case QED_FILTER_TYPE_UCAST:
+ return qed_configure_filter_ucast(edev, &params->filter.ucast);
+ case QED_FILTER_TYPE_MCAST:
+ return qed_configure_filter_mcast(edev, &params->filter.mcast);
+ case QED_FILTER_TYPE_RX_MODE:
+ return qed_configure_filter_rx_mode(edev,
+ params->filter.
+ accept_flags);
+ default:
+ DP_NOTICE(edev, true, "Unknown filter type %d\n",
+ (int)params->type);
+ return -EINVAL;
+ }
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+ INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
+ INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+ INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
+ INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
+ INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
+ INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
+ INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
+ INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
+ INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
+ INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
+ INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
+ INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
+ INIT_STRUCT_FIELD(filter_config, &qed_configure_filter),
+};
+
+uint32_t qed_get_protocol_version(enum qed_protocol protocol)
+{
+ switch (protocol) {
+ case QED_PROTOCOL_ETH:
+ return QED_ETH_INTERFACE_VERSION;
+ default:
+ return 0;
+ }
+}
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+ return &qed_eth_ops_pass;
+}
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
new file mode 100644
index 00000000..26968eb0
--- /dev/null
+++ b/drivers/net/qede/qede_eth_if.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_ETH_IF_H
+#define _QEDE_ETH_IF_H
+
+#include "qede_if.h"
+
+/*forward decl */
+struct eth_slow_path_rx_cqe;
+
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+#define QED_ETH_INTERFACE_VERSION 609
+
+#define QEDE_MAX_MCAST_FILTERS 64
+
+enum qed_filter_rx_mode_type {
+ QED_FILTER_RX_MODE_TYPE_REGULAR,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+ QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_xcast_params_type {
+ QED_FILTER_XCAST_TYPE_ADD,
+ QED_FILTER_XCAST_TYPE_DEL,
+ QED_FILTER_XCAST_TYPE_REPLACE,
+};
+
+enum qed_filter_type {
+ QED_FILTER_TYPE_UCAST,
+ QED_FILTER_TYPE_MCAST,
+ QED_FILTER_TYPE_RX_MODE,
+ QED_MAX_FILTER_TYPES,
+};
+
+struct qed_dev_eth_info {
+ struct qed_dev_info common;
+
+ uint8_t num_queues;
+ uint8_t num_tc;
+
+ struct ether_addr port_mac;
+ uint8_t num_vlan_filters;
+ uint32_t num_mac_addrs;
+};
+
+struct qed_update_vport_rss_params {
+ uint16_t rss_ind_table[128];
+ uint32_t rss_key[10];
+ u8 rss_caps;
+};
+
+struct qed_stop_rxq_params {
+ uint8_t rss_id;
+ uint8_t rx_queue_id;
+ uint8_t vport_id;
+ bool eq_completion_only;
+};
+
+struct qed_update_vport_params {
+ uint8_t vport_id;
+ uint8_t update_vport_active_flg;
+ uint8_t vport_active_flg;
+ uint8_t update_inner_vlan_removal_flg;
+ uint8_t inner_vlan_removal_flg;
+ uint8_t update_tx_switching_flg;
+ uint8_t tx_switching_flg;
+ uint8_t update_accept_any_vlan_flg;
+ uint8_t accept_any_vlan;
+ uint8_t update_rss_flg;
+ struct qed_update_vport_rss_params rss_params;
+};
+
+struct qed_start_vport_params {
+ bool remove_inner_vlan;
+ bool handle_ptp_pkts;
+ bool gro_enable;
+ bool drop_ttl0;
+ uint8_t vport_id;
+ uint16_t mtu;
+ bool clear_stats;
+};
+
+struct qed_stop_txq_params {
+ uint8_t rss_id;
+ uint8_t tx_queue_id;
+};
+
+struct qed_filter_ucast_params {
+ enum qed_filter_xcast_params_type type;
+ uint8_t vlan_valid;
+ uint16_t vlan;
+ uint8_t mac_valid;
+ unsigned char mac[ETHER_ADDR_LEN];
+};
+
+struct qed_filter_mcast_params {
+ enum qed_filter_xcast_params_type type;
+ uint8_t num;
+ unsigned char mac[QEDE_MAX_MCAST_FILTERS][ETHER_ADDR_LEN];
+};
+
+union qed_filter_type_params {
+ enum qed_filter_rx_mode_type accept_flags;
+ struct qed_filter_ucast_params ucast;
+ struct qed_filter_mcast_params mcast;
+};
+
+struct qed_filter_params {
+ enum qed_filter_type type;
+ union qed_filter_type_params filter;
+};
+
+struct qed_eth_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
+
+ int (*vport_start)(struct ecore_dev *edev,
+ struct qed_start_vport_params *params);
+
+ int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
+
+ int (*vport_update)(struct ecore_dev *edev,
+ struct qed_update_vport_params *params);
+
+ int (*q_rx_start)(struct ecore_dev *cdev,
+ uint8_t rss_id, uint8_t rx_queue_id,
+ uint8_t vport_id, uint16_t sb,
+ uint8_t sb_index, uint16_t bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
+
+ int (*q_rx_stop)(struct ecore_dev *edev,
+ struct qed_stop_rxq_params *params);
+
+ int (*q_tx_start)(struct ecore_dev *edev,
+ uint8_t rss_id, uint16_t tx_queue_id,
+ uint8_t vport_id, uint16_t sb,
+ uint8_t sb_index,
+ dma_addr_t pbl_addr,
+ uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
+
+ int (*q_tx_stop)(struct ecore_dev *edev,
+ struct qed_stop_txq_params *params);
+
+ int (*eth_cqe_completion)(struct ecore_dev *edev,
+ uint8_t rss_id,
+ struct eth_slow_path_rx_cqe *cqe);
+
+ int (*fastpath_stop)(struct ecore_dev *edev);
+
+ void (*get_vport_stats)(struct ecore_dev *edev,
+ struct ecore_eth_stats *stats);
+
+ int (*filter_config)(struct ecore_dev *edev,
+ struct qed_filter_params *params);
+};
+
+/* externs */
+
+extern const struct qed_common_ops qed_common_ops_pass;
+
+const struct qed_eth_ops *qed_get_eth_ops();
+
+int qed_configure_filter_rx_mode(struct ecore_dev *edev,
+ enum qed_filter_rx_mode_type type);
+
+#endif /* _QEDE_ETH_IF_H */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
new file mode 100644
index 00000000..bb531be5
--- /dev/null
+++ b/drivers/net/qede/qede_ethdev.c
@@ -0,0 +1,1344 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_ethdev.h"
+#include <rte_alarm.h>
+
+/* Globals */
+static const struct qed_eth_ops *qed_ops;
+static const char *drivername = "qede pmd";
+static int64_t timer_period = 1;
+
+static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
+{
+ ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+}
+
+static void
+qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+ if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_enable failed\n");
+}
+
+static void
+qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
+{
+ rte_memcpy(&qdev->dev_info, info, sizeof(*info));
+ qdev->num_tc = qdev->dev_info.num_tc;
+ qdev->ops = qed_ops;
+}
+
+static void qede_print_adapter_info(struct qede_dev *qdev)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_dev_info *info = &qdev->dev_info.common;
+ static char ver_str[QED_DRV_VER_STR_SIZE];
+
+ DP_INFO(edev, "*********************************\n");
+ DP_INFO(edev, " Chip details : %s%d\n",
+ ECORE_IS_BB(edev) ? "BB" : "AH",
+ CHIP_REV_IS_A0(edev) ? 0 : 1);
+
+ sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
+ edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
+ QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
+ strcpy(qdev->drv_ver, ver_str);
+ DP_INFO(edev, " Driver version : %s\n", ver_str);
+
+ sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
+ info->fw_rev, info->fw_eng);
+ DP_INFO(edev, " Firmware version : %s\n", ver_str);
+
+ sprintf(ver_str, "%d.%d.%d.%d",
+ (info->mfw_rev >> 24) & 0xff,
+ (info->mfw_rev >> 16) & 0xff,
+ (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
+ DP_INFO(edev, " Management firmware version : %s\n", ver_str);
+
+ DP_INFO(edev, " Firmware file : %s\n", fw_file);
+
+ DP_INFO(edev, "*********************************\n");
+}
+
+static int
+qede_set_ucast_rx_mac(struct qede_dev *qdev,
+ enum qed_filter_xcast_params_type opcode,
+ uint8_t mac[ETHER_ADDR_LEN])
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.mac_valid = 1;
+ rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
+ return qdev->ops->filter_config(edev, &filter_cmd);
+}
+
+static void
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+ uint32_t index, __rte_unused uint32_t pool)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (index >= qdev->dev_info.num_mac_addrs) {
+ DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
+ index, qdev->dev_info.num_mac_addrs);
+ return;
+ }
+
+ /* Adding macaddr even though promiscuous mode is set */
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
+
+ /* Add MAC filters according to the unicast secondary macs */
+ rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
+ mac_addr->addr_bytes);
+ if (rc)
+ DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
+}
+
+static void
+qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ether_addr mac_addr;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (index >= qdev->dev_info.num_mac_addrs) {
+ DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
+ index, qdev->dev_info.num_mac_addrs);
+ return;
+ }
+
+ /* Use the index maintained by rte */
+ ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
+ rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
+ mac_addr.addr_bytes);
+ if (rc)
+ DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
+}
+
+static void
+qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rc;
+
+ if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
+ mac_addr->addr_bytes)) {
+ DP_ERR(edev, "Setting MAC address is not allowed\n");
+ ether_addr_copy(&qdev->primary_mac,
+ &eth_dev->data->mac_addrs[0]);
+ return;
+ }
+
+ /* First remove the primary mac */
+ rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
+ qdev->primary_mac.addr_bytes);
+
+ if (rc) {
+ DP_ERR(edev, "Unable to remove current macaddr"
+ " Reverting to previous default mac\n");
+ ether_addr_copy(&qdev->primary_mac,
+ &eth_dev->data->mac_addrs[0]);
+ return;
+ }
+
+ /* Add new MAC */
+ rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
+ mac_addr->addr_bytes);
+
+ if (rc)
+ DP_ERR(edev, "Unable to add new default mac\n");
+ else
+ ether_addr_copy(mac_addr, &qdev->primary_mac);
+}
+
+
+
+
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_update_vport_params params = {
+ .vport_id = 0,
+ .accept_any_vlan = action,
+ .update_accept_any_vlan_flg = 1,
+ };
+ int rc;
+
+ /* Proceed only if action actually needs to be performed */
+ if (qdev->accept_any_vlan == action)
+ return;
+
+ rc = qdev->ops->vport_update(edev, &params);
+ if (rc) {
+ DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+ action ? "enable" : "disable");
+ } else {
+ DP_INFO(edev, "%s accept-any-vlan\n",
+ action ? "enabled" : "disabled");
+ qdev->accept_any_vlan = action;
+ }
+}
+
+void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ /* TODO: - QED_FILTER_TYPE_UCAST */
+ enum qed_filter_rx_mode_type accept_flags =
+ QED_FILTER_RX_MODE_TYPE_REGULAR;
+ struct qed_filter_params rx_mode;
+ int rc;
+
+ /* Configure the struct for the Rx mode */
+ memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+ rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+ rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
+ eth_dev->data->mac_addrs[0].addr_bytes);
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ } else {
+ rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
+ eth_dev->data->
+ mac_addrs[0].addr_bytes);
+ if (rc) {
+ DP_ERR(edev, "Unable to add filter\n");
+ return;
+ }
+ }
+
+ /* take care of VLAN mode */
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
+ qede_config_accept_any_vlan(qdev, true);
+ } else if (!qdev->non_configured_vlans) {
+ /* If we dont have non-configured VLANs and promisc
+ * is not set, then check if we need to disable
+ * accept_any_vlan mode.
+ * Because in this case, accept_any_vlan mode is set
+ * as part of IFF_RPOMISC flag handling.
+ */
+ qede_config_accept_any_vlan(qdev, false);
+ }
+ rx_mode.filter.accept_flags = accept_flags;
+ rc = qdev->ops->filter_config(edev, &rx_mode);
+ if (rc)
+ DP_ERR(edev, "Filter config failed rc=%d\n", rc);
+}
+
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rc;
+
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_inner_vlan_removal_flg = 1;
+ vport_update_params.inner_vlan_removal_flg = set_stripping;
+ rc = qdev->ops->vport_update(edev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ (void)qede_vlan_stripping(eth_dev, 1);
+ else
+ (void)qede_vlan_stripping(eth_dev, 0);
+ }
+
+ DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
+ mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
+}
+
+static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
+ enum qed_filter_xcast_params_type opcode,
+ uint16_t vid)
+{
+ struct qed_filter_params filter_cmd;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.vlan_valid = 1;
+ filter_cmd.filter.ucast.vlan = vid;
+
+ return qdev->ops->filter_config(edev, &filter_cmd);
+}
+
+static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, int on)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_dev_eth_info *dev_info = &qdev->dev_info;
+ int rc;
+
+ if (vlan_id != 0 &&
+ qdev->configured_vlans == dev_info->num_vlan_filters) {
+ DP_NOTICE(edev, false, "Reached max VLAN filter limit"
+ " enabling accept_any_vlan\n");
+ qede_config_accept_any_vlan(qdev, true);
+ return 0;
+ }
+
+ if (on) {
+ rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
+ vlan_id);
+ if (rc)
+ DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
+ rc);
+ else
+ if (vlan_id != 0)
+ qdev->configured_vlans++;
+ } else {
+ rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
+ vlan_id);
+ if (rc)
+ DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
+ vlan_id, rc);
+ else
+ if (vlan_id != 0)
+ qdev->configured_vlans--;
+ }
+
+ DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
+ vlan_id, on, rc, qdev->configured_vlans);
+
+ return rc;
+}
+
+static int qede_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
+ DP_NOTICE(edev, false,
+ "Unequal number of rx/tx queues "
+ "is not supported RX=%u TX=%u\n",
+ eth_dev->data->nb_rx_queues,
+ eth_dev->data->nb_tx_queues);
+ return -EINVAL;
+ }
+
+ /* Check requirements for 100G mode */
+ if (edev->num_hwfns > 1) {
+ if (eth_dev->data->nb_rx_queues < 2) {
+ DP_NOTICE(edev, false,
+ "100G mode requires minimum two queues\n");
+ return -EINVAL;
+ }
+
+ if ((eth_dev->data->nb_rx_queues % 2) != 0) {
+ DP_NOTICE(edev, false,
+ "100G mode requires even number of queues\n");
+ return -EINVAL;
+ }
+ }
+
+ qdev->num_rss = eth_dev->data->nb_rx_queues;
+
+ /* Initial state */
+ qdev->state = QEDE_CLOSE;
+
+ /* Sanity checks and throw warnings */
+
+ if (rxmode->enable_scatter == 1) {
+ DP_ERR(edev, "RX scatter packets is not supported\n");
+ return -EINVAL;
+ }
+
+ if (rxmode->enable_lro == 1) {
+ DP_INFO(edev, "LRO is not supported\n");
+ return -EINVAL;
+ }
+
+ if (!rxmode->hw_strip_crc)
+ DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
+
+ if (!rxmode->hw_ip_checksum)
+ DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
+ "in hw\n");
+
+
+ DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+ QEDE_RSS_CNT(qdev), qdev->num_tc);
+
+ DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
+ " port %u first_on_engine %d\n",
+ edev->hwfns[0].my_id,
+ edev->hwfns[0].rel_pf_id,
+ edev->hwfns[0].abs_pf_id,
+ edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
+
+ return 0;
+}
+
+/* Info about HW descriptor ring limitations */
+static const struct rte_eth_desc_lim qede_rx_desc_lim = {
+ .nb_max = NUM_RX_BDS_MAX,
+ .nb_min = 128,
+ .nb_align = 128 /* lowest common multiple */
+};
+
+static const struct rte_eth_desc_lim qede_tx_desc_lim = {
+ .nb_max = NUM_TX_BDS_MAX,
+ .nb_min = 256,
+ .nb_align = 256
+};
+
+static void
+qede_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
+ QEDE_ETH_OVERHEAD);
+ dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
+ dev_info->rx_desc_lim = qede_rx_desc_lim;
+ dev_info->tx_desc_lim = qede_tx_desc_lim;
+ dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
+ dev_info->max_tx_queues = dev_info->max_rx_queues;
+ dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
+ if (IS_VF(edev))
+ dev_info->max_vfs = 0;
+ else
+ dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
+ dev_info->driver_name = qdev->drv_ver;
+ dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
+ dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .txq_flags = QEDE_TXQ_FLAGS,
+ };
+
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM);
+ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+
+ dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ uint16_t link_duplex;
+ struct qed_link_output link;
+ struct rte_eth_link *curr = &eth_dev->data->dev_link;
+
+ memset(&link, 0, sizeof(struct qed_link_output));
+ qdev->ops->common->get_link(edev, &link);
+
+ /* Link Speed */
+ curr->link_speed = link.speed;
+
+ /* Link Mode */
+ switch (link.duplex) {
+ case QEDE_DUPLEX_HALF:
+ link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case QEDE_DUPLEX_FULL:
+ link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case QEDE_DUPLEX_UNKNOWN:
+ default:
+ link_duplex = -1;
+ }
+ curr->link_duplex = link_duplex;
+
+ /* Link Status */
+ curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
+
+ /* AN */
+ curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
+ ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+
+ DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
+ curr->link_speed, curr->link_duplex,
+ curr->link_autoneg, curr->link_status);
+
+ /* return 0 means link status changed, -1 means not changed */
+ return ((curr->link_status == link.link_up) ? -1 : 0);
+}
+
+static void
+qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
+ enum qed_filter_rx_mode_type accept_flags)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_filter_params rx_mode;
+
+ DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
+
+ memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+ rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+ rx_mode.filter.accept_flags = accept_flags;
+ qdev->ops->filter_config(edev, &rx_mode);
+}
+
+static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+ if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+ type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+ qede_rx_mode_setting(eth_dev, type);
+}
+
+static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
+ qede_rx_mode_setting(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
+ else
+ qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static void qede_poll_sp_sb_cb(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rc;
+
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+ qede_interrupt_action(&edev->hwfns[1]);
+
+ rc = rte_eal_alarm_set(timer_period * US_PER_S,
+ qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ if (rc != 0) {
+ DP_ERR(edev, "Unable to start periodic"
+ " timer rc %d\n", rc);
+ assert(false && "Unable to start periodic timer");
+ }
+}
+
+static void qede_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* dev_stop() shall cleanup fp resources in hw but without releasing
+ * dma memories and sw structures so that dev_start() can be called
+ * by the app without reconfiguration. However, in dev_close() we
+ * can release all the resources and device can be brought up newly
+ */
+ if (qdev->state != QEDE_STOP)
+ qede_dev_stop(eth_dev);
+ else
+ DP_INFO(edev, "Device is already stopped\n");
+
+ qede_free_mem_load(qdev);
+
+ qede_free_fp_arrays(qdev);
+
+ qede_dev_set_link_state(eth_dev, false);
+
+ qdev->ops->common->slowpath_stop(edev);
+
+ qdev->ops->common->remove(edev);
+
+ rte_intr_disable(&eth_dev->pci_dev->intr_handle);
+
+ rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
+ qede_interrupt_handler, (void *)eth_dev);
+
+ if (edev->num_hwfns > 1)
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
+
+ qdev->state = QEDE_CLOSE;
+}
+
+static void
+qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct ecore_eth_stats stats;
+
+ qdev->ops->get_vport_stats(edev, &stats);
+
+ /* RX Stats */
+ eth_stats->ipackets = stats.rx_ucast_pkts +
+ stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+
+ eth_stats->ibytes = stats.rx_ucast_bytes +
+ stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+
+ eth_stats->ierrors = stats.rx_crc_errors +
+ stats.rx_align_errors +
+ stats.rx_carrier_errors +
+ stats.rx_oversize_packets +
+ stats.rx_jabbers + stats.rx_undersize_packets;
+
+ eth_stats->rx_nombuf = stats.no_buff_discards;
+
+ eth_stats->imissed = stats.mftag_filter_discards +
+ stats.mac_filter_discards +
+ stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+
+ /* TX stats */
+ eth_stats->opackets = stats.tx_ucast_pkts +
+ stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+
+ eth_stats->obytes = stats.tx_ucast_bytes +
+ stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+
+ eth_stats->oerrors = stats.tx_err_drop_pkts;
+
+ DP_INFO(edev,
+ "no_buff_discards=%" PRIu64 ""
+ " mac_filter_discards=%" PRIu64 ""
+ " brb_truncates=%" PRIu64 ""
+ " brb_discards=%" PRIu64 "\n",
+ stats.no_buff_discards,
+ stats.mac_filter_discards,
+ stats.brb_truncates, stats.brb_discards);
+}
+
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_params link_params;
+ int rc;
+
+ DP_INFO(edev, "setting link state %d\n", link_up);
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = link_up;
+ rc = qdev->ops->common->set_link(edev, &link_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(edev, "Unable to set link state %d\n", link_up);
+
+ return rc;
+}
+
+static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_set_link_state(eth_dev, true);
+}
+
+static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_set_link_state(eth_dev, false);
+}
+
+static void qede_reset_stats(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ ecore_reset_vport_stats(edev);
+}
+
+static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ enum qed_filter_rx_mode_type type =
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
+
+ qede_rx_mode_setting(eth_dev, type);
+}
+
+static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
+ qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
+ else
+ qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+}
+
+static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+
+ memset(&current_link, 0, sizeof(current_link));
+ qdev->ops->common->get_link(edev, &current_link);
+
+ memset(&params, 0, sizeof(params));
+ params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+ if (fc_conf->autoneg) {
+ if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) {
+ DP_ERR(edev, "Autoneg not supported\n");
+ return -EINVAL;
+ }
+ params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ }
+
+ /* Pause is assumed to be supported (SUPPORTED_Pause) */
+ if (fc_conf->mode == RTE_FC_FULL)
+ params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
+ QED_LINK_PAUSE_RX_ENABLE);
+ if (fc_conf->mode == RTE_FC_TX_PAUSE)
+ params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+ if (fc_conf->mode == RTE_FC_RX_PAUSE)
+ params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+ params.link_up = true;
+ (void)qdev->ops->common->set_link(edev, &params);
+
+ return 0;
+}
+
+static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ qdev->ops->common->get_link(edev, &current_link);
+
+ if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ fc_conf->autoneg = true;
+
+ if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
+ QED_LINK_PAUSE_TX_ENABLE))
+ fc_conf->mode = RTE_FC_FULL;
+ else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static const uint32_t *
+qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (eth_dev->rx_pkt_burst == qede_recv_pkts)
+ return ptypes;
+
+ return NULL;
+}
+
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ uint8_t rss_caps;
+ uint32_t *key = (uint32_t *)rss_conf->rss_key;
+ uint64_t hf = rss_conf->rss_hf;
+ int i;
+
+ if (hf == 0)
+ DP_ERR(edev, "hash function 0 will disable RSS\n");
+
+ rss_caps = 0;
+ rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
+ rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
+ rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
+ rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
+ rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
+ rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+
+ /* If the mapping doesn't fit any supported, return */
+ if (rss_caps == 0 && hf != 0)
+ return -EINVAL;
+
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+
+ if (key != NULL)
+ memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+
+ qdev->rss_params.rss_caps = rss_caps;
+ memcpy(&vport_update_params.rss_params, &qdev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+
+ return qdev->ops->vport_update(edev, &vport_update_params);
+}
+
+int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ uint64_t hf;
+
+ if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
+ return -EINVAL;
+
+ if (rss_conf->rss_key)
+ memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
+ sizeof(qdev->rss_params.rss_key));
+
+ hf = 0;
+ hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ?
+ ETH_RSS_IPV4 : 0;
+ hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
+ ETH_RSS_IPV6 : 0;
+ hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
+ ETH_RSS_IPV6_EX : 0;
+ hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
+ ETH_RSS_NONFRAG_IPV4_TCP : 0;
+ hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
+ ETH_RSS_NONFRAG_IPV6_TCP : 0;
+ hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
+ ETH_RSS_IPV6_TCP_EX : 0;
+
+ rss_conf->rss_hf = hf;
+
+ return 0;
+}
+
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ uint16_t i, idx, shift;
+
+ if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ DP_ERR(edev, "reta_size %d is not supported by hardware\n",
+ reta_size);
+ return -EINVAL;
+ }
+
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ memcpy(&vport_update_params.rss_params, &qdev->rss_params,
+ sizeof(vport_update_params.rss_params));
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift)) {
+ uint8_t entry = reta_conf[idx].reta[shift];
+ qdev->rss_params.rss_ind_table[i] = entry;
+ }
+ }
+
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+
+ return qdev->ops->vport_update(edev, &vport_update_params);
+}
+
+int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ uint16_t i, idx, shift;
+
+ if (reta_size > ETH_RSS_RETA_SIZE_128) {
+ struct ecore_dev *edev = &qdev->edev;
+ DP_ERR(edev, "reta_size %d is not supported\n",
+ reta_size);
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift)) {
+ uint8_t entry = qdev->rss_params.rss_ind_table[i];
+ reta_conf[idx].reta[shift] = entry;
+ }
+ }
+
+ return 0;
+}
+
+int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ uint32_t frame_size;
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct rte_eth_dev_info dev_info = {0};
+
+ qede_dev_info_get(dev, &dev_info);
+
+ /* VLAN_TAG = 4 */
+ frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
+
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ return -EINVAL;
+
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ qdev->mtu = mtu;
+ qede_dev_stop(dev);
+ qede_dev_start(dev);
+
+ return 0;
+}
+
+static const struct eth_dev_ops qede_eth_dev_ops = {
+ .dev_configure = qede_dev_configure,
+ .dev_infos_get = qede_dev_info_get,
+ .rx_queue_setup = qede_rx_queue_setup,
+ .rx_queue_release = qede_rx_queue_release,
+ .tx_queue_setup = qede_tx_queue_setup,
+ .tx_queue_release = qede_tx_queue_release,
+ .dev_start = qede_dev_start,
+ .dev_set_link_up = qede_dev_set_link_up,
+ .dev_set_link_down = qede_dev_set_link_down,
+ .link_update = qede_link_update,
+ .promiscuous_enable = qede_promiscuous_enable,
+ .promiscuous_disable = qede_promiscuous_disable,
+ .allmulticast_enable = qede_allmulticast_enable,
+ .allmulticast_disable = qede_allmulticast_disable,
+ .dev_stop = qede_dev_stop,
+ .dev_close = qede_dev_close,
+ .stats_get = qede_get_stats,
+ .stats_reset = qede_reset_stats,
+ .mac_addr_add = qede_mac_addr_add,
+ .mac_addr_remove = qede_mac_addr_remove,
+ .mac_addr_set = qede_mac_addr_set,
+ .vlan_offload_set = qede_vlan_offload_set,
+ .vlan_filter_set = qede_vlan_filter_set,
+ .flow_ctrl_set = qede_flow_ctrl_set,
+ .flow_ctrl_get = qede_flow_ctrl_get,
+ .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+ .rss_hash_update = qede_rss_hash_update,
+ .rss_hash_conf_get = qede_rss_hash_conf_get,
+ .reta_update = qede_rss_reta_update,
+ .reta_query = qede_rss_reta_query,
+ .mtu_set = qede_set_mtu,
+};
+
+static const struct eth_dev_ops qede_eth_vf_dev_ops = {
+ .dev_configure = qede_dev_configure,
+ .dev_infos_get = qede_dev_info_get,
+ .rx_queue_setup = qede_rx_queue_setup,
+ .rx_queue_release = qede_rx_queue_release,
+ .tx_queue_setup = qede_tx_queue_setup,
+ .tx_queue_release = qede_tx_queue_release,
+ .dev_start = qede_dev_start,
+ .dev_set_link_up = qede_dev_set_link_up,
+ .dev_set_link_down = qede_dev_set_link_down,
+ .link_update = qede_link_update,
+ .promiscuous_enable = qede_promiscuous_enable,
+ .promiscuous_disable = qede_promiscuous_disable,
+ .allmulticast_enable = qede_allmulticast_enable,
+ .allmulticast_disable = qede_allmulticast_disable,
+ .dev_stop = qede_dev_stop,
+ .dev_close = qede_dev_close,
+ .stats_get = qede_get_stats,
+ .stats_reset = qede_reset_stats,
+ .vlan_offload_set = qede_vlan_offload_set,
+ .vlan_filter_set = qede_vlan_filter_set,
+ .dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
+ .rss_hash_update = qede_rss_hash_update,
+ .rss_hash_conf_get = qede_rss_hash_conf_get,
+ .reta_update = qede_rss_reta_update,
+ .reta_query = qede_rss_reta_query,
+ .mtu_set = qede_set_mtu,
+};
+
+static void qede_update_pf_params(struct ecore_dev *edev)
+{
+ struct ecore_pf_params pf_params;
+ /* 32 rx + 32 tx */
+ memset(&pf_params, 0, sizeof(struct ecore_pf_params));
+ pf_params.eth_pf_params.num_cons = 64;
+ qed_ops->common->update_pf_params(edev, &pf_params);
+}
+
+static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
+{
+ struct rte_pci_device *pci_dev;
+ struct rte_pci_addr pci_addr;
+ struct qede_dev *adapter;
+ struct ecore_dev *edev;
+ struct qed_dev_eth_info dev_info;
+ struct qed_slowpath_params params;
+ uint32_t qed_ver;
+ static bool do_once = true;
+ uint8_t bulletin_change;
+ uint8_t vf_mac[ETHER_ADDR_LEN];
+ uint8_t is_mac_forced;
+ bool is_mac_exist;
+ /* Fix up ecore debug level */
+ uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
+ uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+ uint32_t max_mac_addrs;
+ int rc;
+
+ /* Extract key data structures */
+ adapter = eth_dev->data->dev_private;
+ edev = &adapter->edev;
+ pci_addr = eth_dev->pci_dev->addr;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
+ pci_addr.bus, pci_addr.devid, pci_addr.function,
+ eth_dev->data->port_id);
+
+ eth_dev->rx_pkt_burst = qede_recv_pkts;
+ eth_dev->tx_pkt_burst = qede_xmit_pkts;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DP_NOTICE(edev, false,
+ "Skipping device init from secondary process\n");
+ return 0;
+ }
+
+ pci_dev = eth_dev->pci_dev;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
+
+ qed_ops = qed_get_eth_ops();
+ if (!qed_ops) {
+ DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
+ return -EINVAL;
+ }
+
+ DP_INFO(edev, "Starting qede probe\n");
+
+ rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
+ dp_module, dp_level, is_vf);
+
+ if (rc != 0) {
+ DP_ERR(edev, "qede probe failed rc %d\n", rc);
+ return -ENODEV;
+ }
+
+ qede_update_pf_params(edev);
+
+ rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
+ qede_interrupt_handler, (void *)eth_dev);
+
+ if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
+ DP_ERR(edev, "rte_intr_enable() failed\n");
+ return -ENODEV;
+ }
+
+ /* Start the Slowpath-process */
+ memset(&params, 0, sizeof(struct qed_slowpath_params));
+ params.int_mode = ECORE_INT_MODE_MSIX;
+ params.drv_major = QEDE_MAJOR_VERSION;
+ params.drv_minor = QEDE_MINOR_VERSION;
+ params.drv_rev = QEDE_REVISION_VERSION;
+ params.drv_eng = QEDE_ENGINEERING_VERSION;
+ strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+
+ /* For CMT mode device do periodic polling for slowpath events.
+ * This is required since uio device uses only one MSI-x
+ * interrupt vector but we need one for each engine.
+ */
+ if (edev->num_hwfns > 1) {
+ rc = rte_eal_alarm_set(timer_period * US_PER_S,
+ qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ if (rc != 0) {
+ DP_ERR(edev, "Unable to start periodic"
+ " timer rc %d\n", rc);
+ return -EINVAL;
+ }
+ }
+
+ rc = qed_ops->common->slowpath_start(edev, &params);
+ if (rc) {
+ DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENODEV;
+ }
+
+ rc = qed_ops->fill_dev_info(edev, &dev_info);
+ if (rc) {
+ DP_ERR(edev, "Cannot get device_info rc %d\n", rc);
+ qed_ops->common->slowpath_stop(edev);
+ qed_ops->common->remove(edev);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENODEV;
+ }
+
+ qede_alloc_etherdev(adapter, &dev_info);
+
+ adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
+
+ if (!is_vf)
+ adapter->dev_info.num_mac_addrs =
+ (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
+ ECORE_MAC);
+ else
+ ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
+ &adapter->dev_info.num_mac_addrs);
+
+ /* Allocate memory for storing MAC addr */
+ eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
+ (ETHER_ADDR_LEN *
+ adapter->dev_info.num_mac_addrs),
+ RTE_CACHE_LINE_SIZE);
+
+ if (eth_dev->data->mac_addrs == NULL) {
+ DP_ERR(edev, "Failed to allocate MAC address\n");
+ qed_ops->common->slowpath_stop(edev);
+ qed_ops->common->remove(edev);
+ rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
+ (void *)eth_dev);
+ return -ENOMEM;
+ }
+
+ if (!is_vf) {
+ ether_addr_copy((struct ether_addr *)edev->hwfns[0].
+ hw_info.hw_mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+ ether_addr_copy(&eth_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ } else {
+ ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
+ &bulletin_change);
+ if (bulletin_change) {
+ is_mac_exist =
+ ecore_vf_bulletin_get_forced_mac(
+ ECORE_LEADING_HWFN(edev),
+ vf_mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced) {
+ DP_INFO(edev, "VF macaddr received from PF\n");
+ ether_addr_copy((struct ether_addr *)&vf_mac,
+ &eth_dev->data->mac_addrs[0]);
+ ether_addr_copy(&eth_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ } else {
+ DP_NOTICE(edev, false,
+ "No VF macaddr assigned\n");
+ }
+ }
+ }
+
+ eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+
+ if (do_once) {
+ qede_print_adapter_info(adapter);
+ do_once = false;
+ }
+
+ DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ adapter->primary_mac.addr_bytes[0],
+ adapter->primary_mac.addr_bytes[1],
+ adapter->primary_mac.addr_bytes[2],
+ adapter->primary_mac.addr_bytes[3],
+ adapter->primary_mac.addr_bytes[4],
+ adapter->primary_mac.addr_bytes[5]);
+
+ return rc;
+}
+
+static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ return qede_common_dev_init(eth_dev, 1);
+}
+
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ return qede_common_dev_init(eth_dev, 0);
+}
+
+static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
+{
+ /* only uninitialize in the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* safe to close dev here */
+ qede_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ if (eth_dev->data->mac_addrs)
+ rte_free(eth_dev->data->mac_addrs);
+
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_common_uninit(eth_dev);
+}
+
+static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ return qede_dev_common_uninit(eth_dev);
+}
+
+static struct rte_pci_id pci_id_qedevf_map[] = {
+#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
+ },
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
+ },
+ {.vendor_id = 0,}
+};
+
+static struct rte_pci_id pci_id_qede_map[] = {
+#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
+ },
+ {.vendor_id = 0,}
+};
+
+static struct eth_driver rte_qedevf_pmd = {
+ .pci_drv = {
+ .name = "rte_qedevf_pmd",
+ .id_table = pci_id_qedevf_map,
+ .drv_flags =
+ RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ },
+ .eth_dev_init = qedevf_eth_dev_init,
+ .eth_dev_uninit = qedevf_eth_dev_uninit,
+ .dev_private_size = sizeof(struct qede_dev),
+};
+
+static struct eth_driver rte_qede_pmd = {
+ .pci_drv = {
+ .name = "rte_qede_pmd",
+ .id_table = pci_id_qede_map,
+ .drv_flags =
+ RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ },
+ .eth_dev_init = qede_eth_dev_init,
+ .eth_dev_uninit = qede_eth_dev_uninit,
+ .dev_private_size = sizeof(struct qede_dev),
+};
+
+static int
+rte_qedevf_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ rte_eth_driver_register(&rte_qedevf_pmd);
+
+ return 0;
+}
+
+static int
+rte_qede_pmd_init(const char *name __rte_unused,
+ const char *params __rte_unused)
+{
+ rte_eth_driver_register(&rte_qede_pmd);
+
+ return 0;
+}
+
+static struct rte_driver rte_qedevf_driver = {
+ .type = PMD_PDEV,
+ .init = rte_qede_pmd_init
+};
+
+static struct rte_driver rte_qede_driver = {
+ .type = PMD_PDEV,
+ .init = rte_qedevf_pmd_init
+};
+
+PMD_REGISTER_DRIVER(rte_qede_driver);
+PMD_REGISTER_DRIVER(rte_qedevf_driver);
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
new file mode 100644
index 00000000..abb33af1
--- /dev/null
+++ b/drivers/net/qede/qede_ethdev.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+
+#ifndef _QEDE_ETHDEV_H_
+#define _QEDE_ETHDEV_H_
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+
+/* ecore includes */
+#include "base/bcm_osal.h"
+#include "base/ecore.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_l2_api.h"
+#include "base/ecore_vf_api.h"
+#include "base/ecore_hsi_common.h"
+#include "base/ecore_int_api.h"
+#include "base/ecore_chain.h"
+#include "base/ecore_status.h"
+#include "base/ecore_hsi_eth.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_iov_api.h"
+
+#include "qede_logs.h"
+#include "qede_if.h"
+#include "qede_eth_if.h"
+
+#include "qede_rxtx.h"
+
+#define qede_stringify1(x...) #x
+#define qede_stringify(x...) qede_stringify1(x)
+
+/* Driver versions */
+#define QEDE_PMD_VER_PREFIX "QEDE PMD"
+#define QEDE_PMD_VERSION_MAJOR 1
+#define QEDE_PMD_VERSION_MINOR 1
+#define QEDE_PMD_VERSION_REVISION 0
+#define QEDE_PMD_VERSION_PATCH 1
+
+#define QEDE_MAJOR_VERSION 8
+#define QEDE_MINOR_VERSION 7
+#define QEDE_REVISION_VERSION 9
+#define QEDE_ENGINEERING_VERSION 0
+
+#define QEDE_DRV_MODULE_VERSION qede_stringify(QEDE_MAJOR_VERSION) "." \
+ qede_stringify(QEDE_MINOR_VERSION) "." \
+ qede_stringify(QEDE_REVISION_VERSION) "." \
+ qede_stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_RSS_INDIR_INITED (1 << 0)
+#define QEDE_RSS_KEY_INITED (1 << 1)
+#define QEDE_RSS_CAPS_INITED (1 << 2)
+
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
+ (edev)->dev_info.num_tc)
+
+#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
+#define QEDE_TSS_CNT(edev) ((edev)->num_rss * (edev)->num_tc)
+
+#define QEDE_DUPLEX_FULL 1
+#define QEDE_DUPLEX_HALF 2
+#define QEDE_DUPLEX_UNKNOWN 0xff
+
+#define QEDE_SUPPORTED_AUTONEG (1 << 6)
+#define QEDE_SUPPORTED_PAUSE (1 << 13)
+
+#define QEDE_INIT_QDEV(eth_dev) (eth_dev->data->dev_private)
+
+#define QEDE_INIT_EDEV(adapter) (&((struct qede_dev *)adapter)->edev)
+
+#define QEDE_INIT(eth_dev) { \
+ struct qede_dev *qdev = eth_dev->data->dev_private; \
+ struct ecore_dev *edev = &qdev->edev; \
+}
+
+/************* QLogic 25G/40G/100G vendor/devices ids *************/
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+
+#define CHIP_NUM_57980E 0x1634
+#define CHIP_NUM_57980S 0x1629
+#define CHIP_NUM_VF 0x1630
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_57980S_100 0x1644
+
+#define PCI_DEVICE_ID_NX2_57980E CHIP_NUM_57980E
+#define PCI_DEVICE_ID_NX2_57980S CHIP_NUM_57980S
+#define PCI_DEVICE_ID_NX2_VF CHIP_NUM_VF
+#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
+
+extern char fw_file[];
+
+/* Port/function states */
+enum dev_state {
+ QEDE_START,
+ QEDE_STOP,
+ QEDE_CLOSE
+};
+
+struct qed_int_param {
+ uint32_t int_mode;
+ uint8_t num_vectors;
+ uint8_t min_msix_cnt;
+};
+
+struct qed_int_params {
+ struct qed_int_param in;
+ struct qed_int_param out;
+ bool fp_initialized;
+};
+
+/*
+ * Structure to store private data for each port.
+ */
+struct qede_dev {
+ struct ecore_dev edev;
+ uint8_t protocol;
+ const struct qed_eth_ops *ops;
+ struct qed_dev_eth_info dev_info;
+ struct ecore_sb_info *sb_array;
+ struct qede_fastpath *fp_array;
+ uint16_t num_rss;
+ uint8_t num_tc;
+ uint16_t mtu;
+ bool rss_enabled;
+ struct qed_update_vport_rss_params rss_params;
+ uint32_t flags;
+ bool gro_disable;
+ struct qede_rx_queue **rx_queues;
+ struct qede_tx_queue **tx_queues;
+ enum dev_state state;
+
+ /* Vlans */
+ osal_list_t vlan_list;
+ uint16_t configured_vlans;
+ uint16_t non_configured_vlans;
+ bool accept_any_vlan;
+ uint16_t vxlan_dst_port;
+
+ struct ether_addr primary_mac;
+ bool handle_hw_err;
+ char drv_ver[QED_DRV_VER_STR_SIZE];
+};
+
+int qed_fill_eth_dev_info(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
+int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
+void qede_config_rx_mode(struct rte_eth_dev *eth_dev);
+
+#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
new file mode 100644
index 00000000..1b05ff86
--- /dev/null
+++ b/drivers/net/qede/qede_if.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_IF_H
+#define _QEDE_IF_H
+
+#include "qede_ethdev.h"
+
+/* forward */
+struct ecore_dev;
+struct qed_sb_info;
+struct qed_pf_params;
+enum ecore_int_mode;
+
+struct qed_dev_info {
+ uint8_t num_hwfns;
+ uint8_t hw_mac[ETHER_ADDR_LEN];
+ bool is_mf_default;
+
+ /* FW version */
+ uint16_t fw_major;
+ uint16_t fw_minor;
+ uint16_t fw_rev;
+ uint16_t fw_eng;
+
+ /* MFW version */
+ uint32_t mfw_rev;
+
+ uint32_t flash_size;
+ uint8_t mf_mode;
+ bool tx_switching;
+ /* To be added... */
+};
+
+enum qed_sb_type {
+ QED_SB_TYPE_L2_QUEUE,
+ QED_SB_TYPE_STORAGE,
+ QED_SB_TYPE_CNQ,
+};
+
+enum qed_protocol {
+ QED_PROTOCOL_ETH,
+};
+
+struct qed_link_params {
+ bool link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG (1 << 0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS (1 << 1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED (1 << 2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG (1 << 3)
+ uint32_t override_flags;
+ bool autoneg;
+ uint32_t adv_speeds;
+ uint32_t forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE (1 << 0)
+#define QED_LINK_PAUSE_RX_ENABLE (1 << 1)
+#define QED_LINK_PAUSE_TX_ENABLE (1 << 2)
+ uint32_t pause_config;
+};
+
+struct qed_link_output {
+ bool link_up;
+ uint32_t supported_caps; /* In SUPPORTED defs */
+ uint32_t advertised_caps; /* In ADVERTISED defs */
+ uint32_t lp_caps; /* In ADVERTISED defs */
+ uint32_t speed; /* In Mb/s */
+ uint8_t duplex; /* In DUPLEX defs */
+ uint8_t port; /* In PORT defs */
+ bool autoneg;
+ uint32_t pause_config;
+};
+
+#define QED_DRV_VER_STR_SIZE 80
+struct qed_slowpath_params {
+ uint32_t int_mode;
+ uint8_t drv_major;
+ uint8_t drv_minor;
+ uint8_t drv_rev;
+ uint8_t drv_eng;
+ uint8_t name[QED_DRV_VER_STR_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+
+struct qed_common_cb_ops {
+ void (*link_update)(void *dev, struct qed_link_output *link);
+};
+
+struct qed_selftest_ops {
+/**
+ * @brief registers - Perform register tests
+ *
+ * @param edev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*registers)(struct ecore_dev *edev);
+};
+
+struct qed_common_ops {
+ int (*probe)(struct ecore_dev *edev,
+ struct rte_pci_device *pci_dev,
+ enum qed_protocol protocol,
+ uint32_t dp_module, uint8_t dp_level, bool is_vf);
+ void (*set_id)(struct ecore_dev *edev,
+ char name[], const char ver_str[]);
+ enum _ecore_status_t (*chain_alloc)(struct ecore_dev *edev,
+ enum ecore_chain_use_mode
+ intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ uint32_t num_elems,
+ osal_size_t elem_size,
+ struct ecore_chain *p_chain);
+
+ void (*chain_free)(struct ecore_dev *edev,
+ struct ecore_chain *p_chain);
+
+ void (*get_link)(struct ecore_dev *edev,
+ struct qed_link_output *if_link);
+ int (*set_link)(struct ecore_dev *edev,
+ struct qed_link_params *params);
+
+ int (*drain)(struct ecore_dev *edev);
+
+ void (*remove)(struct ecore_dev *edev);
+
+ int (*slowpath_stop)(struct ecore_dev *edev);
+
+ void (*update_pf_params)(struct ecore_dev *edev,
+ struct ecore_pf_params *params);
+
+ int (*slowpath_start)(struct ecore_dev *edev,
+ struct qed_slowpath_params *params);
+
+ int (*set_fp_int)(struct ecore_dev *edev, uint16_t cnt);
+
+ uint32_t (*sb_init)(struct ecore_dev *edev,
+ struct ecore_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ uint16_t sb_id, enum qed_sb_type type);
+
+ bool (*can_link_change)(struct ecore_dev *edev);
+ void (*update_msglvl)(struct ecore_dev *edev,
+ uint32_t dp_module, uint8_t dp_level);
+};
+
+/**
+ * @brief qed_get_protocol_version
+ *
+ * @param protocol
+ *
+ * @return version supported by qed for given protocol driver
+ */
+uint32_t qed_get_protocol_version(enum qed_protocol protocol);
+
+#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
new file mode 100644
index 00000000..45c4af09
--- /dev/null
+++ b/drivers/net/qede/qede_logs.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _QEDE_LOGS_H_
+#define _QEDE_LOGS_H_
+
+#define DP_ERR(p_dev, fmt, ...) \
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__)
+
+#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
+ rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+
+#define DP_INFO(p_dev, fmt, ...) \
+ rte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__)
+#else
+#define DP_INFO(p_dev, fmt, ...) do { } while (0)
+
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+#define DP_VERBOSE(p_dev, module, fmt, ...) \
+do { \
+ if ((p_dev)->dp_module & module) \
+ rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_PMD, \
+ "[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ ##__VA_ARGS__); \
+} while (0)
+#else
+#define DP_VERBOSE(p_dev, fmt, ...) do { } while (0)
+#endif
+
+#define PMD_INIT_LOG(level, edev, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "[qede_pmd: %s] %s() " fmt "\n", \
+ (edev)->name, __func__, ##args)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE(edev) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+#define PMD_TX_LOG(level, q, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \
+ __func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+#define PMD_RX_LOG(level, q, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \
+ __func__, q->port_id, q->queue_id, ## args)
+#else
+#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _QEDE_LOGS_H_ */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
new file mode 100644
index 00000000..73608c69
--- /dev/null
+++ b/drivers/net/qede/qede_main.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <zlib.h>
+#include <limits.h>
+#include <rte_alarm.h>
+
+#include "qede_ethdev.h"
+
+static uint8_t npar_tx_switching = 1;
+
+/* Alarm timeout. */
+#define QEDE_ALARM_TIMEOUT_US 100000
+
+#define CONFIG_QED_BINARY_FW
+/* Global variable to hold absolute path of fw file */
+char fw_file[PATH_MAX];
+
+const char *QEDE_DEFAULT_FIRMWARE =
+ "/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin";
+
+static void
+qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
+{
+ int i;
+
+ for (i = 0; i < edev->num_hwfns; i++) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+ p_hwfn->pf_params = *params;
+ }
+}
+
+static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
+{
+ edev->regview = pci_dev->mem_resource[0].addr;
+ edev->doorbells = pci_dev->mem_resource[2].addr;
+}
+
+static int
+qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
+ enum qed_protocol protocol, uint32_t dp_module,
+ uint8_t dp_level, bool is_vf)
+{
+ struct qede_dev *qdev = (struct qede_dev *)edev;
+ int rc;
+
+ ecore_init_struct(edev);
+ qdev->protocol = protocol;
+ if (is_vf) {
+ edev->b_is_vf = true;
+ edev->sriov_info.b_hw_channel = true;
+ }
+ ecore_init_dp(edev, dp_module, dp_level, NULL);
+ qed_init_pci(edev, pci_dev);
+ rc = ecore_hw_prepare(edev, ECORE_PCI_DEFAULT);
+ if (rc) {
+ DP_ERR(edev, "hw prepare failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int qed_nic_setup(struct ecore_dev *edev)
+{
+ int rc, i;
+
+ rc = ecore_resc_alloc(edev);
+ if (rc)
+ return rc;
+
+ DP_INFO(edev, "Allocated qed resources\n");
+ ecore_resc_setup(edev);
+
+ return rc;
+}
+
+static int qed_alloc_stream_mem(struct ecore_dev *edev)
+{
+ int i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_hwfn->stream));
+ if (!p_hwfn->stream)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void qed_free_stream_mem(struct ecore_dev *edev)
+{
+ int i;
+
+ for_each_hwfn(edev, i) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ if (!p_hwfn->stream)
+ return;
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);
+ }
+}
+
+static int qed_load_firmware_data(struct ecore_dev *edev)
+{
+ int fd;
+ struct stat st;
+ const char *fw = RTE_LIBRTE_QEDE_FW;
+
+ if (strcmp(fw, "") == 0)
+ strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
+ else
+ strcpy(fw_file, fw);
+
+ fd = open(fw_file, O_RDONLY);
+ if (fd < 0) {
+ DP_NOTICE(edev, false, "Can't open firmware file\n");
+ return -ENOENT;
+ }
+
+ if (fstat(fd, &st) < 0) {
+ DP_NOTICE(edev, false, "Can't stat firmware file\n");
+ return -1;
+ }
+
+ edev->firmware = rte_zmalloc("qede_fw", st.st_size,
+ RTE_CACHE_LINE_SIZE);
+ if (!edev->firmware) {
+ DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
+ close(fd);
+ return -ENOMEM;
+ }
+
+ if (read(fd, edev->firmware, st.st_size) != st.st_size) {
+ DP_NOTICE(edev, false, "Can't read firmware data\n");
+ close(fd);
+ return -1;
+ }
+
+ edev->fw_len = st.st_size;
+ if (edev->fw_len < 104) {
+ DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
+ edev->fw_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn)
+{
+ uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+
+ is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced)
+ rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN);
+
+ /* Always update link configuration according to bulletin */
+ qed_link_update(hwfn);
+}
+
+static void qede_vf_task(void *arg)
+{
+ struct ecore_hwfn *p_hwfn = arg;
+ uint8_t change = 0;
+
+ /* Read the bulletin board, and re-schedule the task */
+ ecore_vf_read_bulletin(p_hwfn, &change);
+ if (change)
+ qed_handle_bulletin_change(p_hwfn);
+
+ rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn);
+}
+
+static void qed_start_iov_task(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (!IS_PF(edev))
+ rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task,
+ p_hwfn);
+ }
+}
+
+static void qed_stop_iov_task(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (!IS_PF(edev))
+ rte_eal_alarm_cancel(qede_vf_task, p_hwfn);
+ }
+}
+static int qed_slowpath_start(struct ecore_dev *edev,
+ struct qed_slowpath_params *params)
+{
+ bool allow_npar_tx_switching;
+ const uint8_t *data = NULL;
+ struct ecore_hwfn *hwfn;
+ struct ecore_mcp_drv_version drv_version;
+ struct qede_dev *qdev = (struct qede_dev *)edev;
+ int rc;
+#ifdef QED_ENC_SUPPORTED
+ struct ecore_tunn_start_params tunn_info;
+#endif
+
+#ifdef CONFIG_QED_BINARY_FW
+ if (IS_PF(edev)) {
+ rc = qed_load_firmware_data(edev);
+ if (rc) {
+ DP_NOTICE(edev, true,
+ "Failed to find fw file %s\n", fw_file);
+ goto err;
+ }
+ }
+#endif
+
+ rc = qed_nic_setup(edev);
+ if (rc)
+ goto err;
+
+ /* set int_coalescing_mode */
+ edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
+
+ /* Should go with CONFIG_QED_BINARY_FW */
+ if (IS_PF(edev)) {
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(edev);
+ if (rc) {
+ DP_NOTICE(edev, true,
+ "Failed to allocate stream memory\n");
+ goto err2;
+ }
+ }
+
+ qed_start_iov_task(edev);
+
+ /* Start the slowpath */
+#ifdef CONFIG_QED_BINARY_FW
+ if (IS_PF(edev))
+ data = edev->firmware;
+#endif
+ allow_npar_tx_switching = npar_tx_switching ? true : false;
+
+#ifdef QED_ENC_SUPPORTED
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
+ 1 << QED_MODE_L2GRE_TUNN |
+ 1 << QED_MODE_IPGRE_TUNN |
+ 1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN;
+ tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
+ rc = ecore_hw_init(edev, &tunn_info, true, ECORE_INT_MODE_MSIX,
+ allow_npar_tx_switching, data);
+#else
+ rc = ecore_hw_init(edev, NULL, true, ECORE_INT_MODE_MSIX,
+ allow_npar_tx_switching, data);
+#endif
+ if (rc) {
+ DP_ERR(edev, "ecore_hw_init failed\n");
+ goto err2;
+ }
+
+ DP_INFO(edev, "HW inited and function started\n");
+
+ if (IS_PF(edev)) {
+ hwfn = ECORE_LEADING_HWFN(edev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) | (params->drv_eng);
+ /* TBD: strlcpy() */
+ strncpy((char *)drv_version.name, (const char *)params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(edev, true,
+ "Failed sending drv version command\n");
+ return rc;
+ }
+ }
+
+ ecore_reset_vport_stats(edev);
+
+ return 0;
+
+ ecore_hw_stop(edev);
+err2:
+ ecore_resc_free(edev);
+err:
+#ifdef CONFIG_QED_BINARY_FW
+ if (IS_PF(edev)) {
+ if (edev->firmware)
+ rte_free(edev->firmware);
+ edev->firmware = NULL;
+ }
+#endif
+ qed_stop_iov_task(edev);
+
+ return rc;
+}
+
+static int
+qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
+{
+ struct ecore_ptt *ptt = NULL;
+
+ memset(dev_info, 0, sizeof(struct qed_dev_info));
+ dev_info->num_hwfns = edev->num_hwfns;
+ dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
+ rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+ ETHER_ADDR_LEN);
+
+ if (IS_PF(edev)) {
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = edev->mf_mode;
+ dev_info->tx_switching = false;
+ } else {
+ ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major,
+ &dev_info->fw_minor, &dev_info->fw_rev,
+ &dev_info->fw_eng);
+ }
+
+ if (IS_PF(edev)) {
+ ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
+ if (ptt) {
+ ecore_mcp_get_mfw_ver(edev, ptt,
+ &dev_info->mfw_rev, NULL);
+
+ ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,
+ &dev_info->flash_size);
+
+ /* Workaround to allow PHY-read commands for
+ * B0 bringup.
+ */
+ if (ECORE_IS_BB_B0(edev))
+ dev_info->flash_size = 0xffffffff;
+
+ ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
+ }
+ } else {
+ ecore_mcp_get_mfw_ver(edev, ptt, &dev_info->mfw_rev, NULL);
+ }
+
+ return 0;
+}
+
+int
+qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
+{
+ struct qede_dev *qdev = (struct qede_dev *)edev;
+ int i;
+
+ memset(info, 0, sizeof(*info));
+
+ info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
+
+ if (IS_PF(edev)) {
+ info->num_queues = 0;
+ for_each_hwfn(edev, i)
+ info->num_queues +=
+ FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
+
+ info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN);
+
+ rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
+ ETHER_ADDR_LEN);
+ } else {
+ ecore_vf_get_num_rxqs(&edev->hwfns[0], &info->num_queues);
+
+ ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
+ &info->num_vlan_filters);
+
+ ecore_vf_get_port_mac(&edev->hwfns[0],
+ (uint8_t *)&info->port_mac);
+ }
+
+ qed_fill_dev_info(edev, &info->common);
+
+ if (IS_VF(edev))
+ memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+static void
+qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
+ const char ver_str[VER_SIZE])
+{
+ int i;
+
+ rte_memcpy(edev->name, name, NAME_SIZE);
+ for_each_hwfn(edev, i) {
+ snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+ }
+ rte_memcpy(edev->ver_str, ver_str, VER_SIZE);
+ edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static uint32_t
+qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
+ void *sb_virt_addr, dma_addr_t sb_phy_addr,
+ uint16_t sb_id, enum qed_sb_type type)
+{
+ struct ecore_hwfn *p_hwfn;
+ int hwfn_index;
+ uint16_t rel_sb_id;
+ uint8_t n_hwfns;
+ uint32_t rc;
+
+ /* RoCE uses single engine and CMT uses two engines. When using both
+ * we force only a single engine. Storage uses only engine 0 too.
+ */
+ if (type == QED_SB_TYPE_L2_QUEUE)
+ n_hwfns = edev->num_hwfns;
+ else
+ n_hwfns = 1;
+
+ hwfn_index = sb_id % n_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rel_sb_id = sb_id / n_hwfns;
+
+ DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ hwfn_index, rel_sb_id, sb_id);
+
+ rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+ sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+ return rc;
+}
+
+static void qed_fill_link(struct ecore_hwfn *hwfn,
+ struct qed_link_output *if_link)
+{
+ struct ecore_mcp_link_params params;
+ struct ecore_mcp_link_state link;
+ struct ecore_mcp_link_capabilities link_caps;
+ uint32_t media_type;
+ uint8_t change = 0;
+
+ memset(if_link, 0, sizeof(*if_link));
+
+ /* Prepare source inputs */
+ if (IS_PF(hwfn->p_dev)) {
+ rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
+ sizeof(params));
+ rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
+ rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
+ sizeof(link_caps));
+ } else {
+ ecore_vf_read_bulletin(hwfn, &change);
+ ecore_vf_get_link_params(hwfn, &params);
+ ecore_vf_get_link_state(hwfn, &link);
+ ecore_vf_get_link_caps(hwfn, &link_caps);
+ }
+
+ /* Set the link parameters to pass to protocol driver */
+ if (link.link_up)
+ if_link->link_up = true;
+
+ if (link.link_up)
+ if_link->speed = link.speed;
+
+ if_link->duplex = QEDE_DUPLEX_FULL;
+
+ if (params.speed.autoneg)
+ if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;
+
+ if (params.pause.autoneg || params.pause.forced_rx ||
+ params.pause.forced_tx)
+ if_link->supported_caps |= QEDE_SUPPORTED_PAUSE;
+
+ if (params.pause.autoneg)
+ if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+
+ if (params.pause.forced_rx)
+ if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+
+ if (params.pause.forced_tx)
+ if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+}
+
+static void
+qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
+{
+ qed_fill_link(&edev->hwfns[0], if_link);
+
+#ifdef CONFIG_QED_SRIOV
+ for_each_hwfn(cdev, i)
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
+#endif
+}
+
+static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
+{
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
+ struct ecore_mcp_link_params *link_params;
+ int rc;
+
+ if (IS_VF(edev))
+ return 0;
+
+ /* The link should be set only once per PF */
+ hwfn = &edev->hwfns[0];
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ link_params = ecore_mcp_get_link_params(hwfn);
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ link_params->speed.autoneg = params->autoneg;
+
+ if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+ if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ link_params->pause.autoneg = true;
+ else
+ link_params->pause.autoneg = false;
+ if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ link_params->pause.forced_rx = true;
+ else
+ link_params->pause.forced_rx = false;
+ if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ link_params->pause.forced_tx = true;
+ else
+ link_params->pause.forced_tx = false;
+ }
+
+ rc = ecore_mcp_set_link(hwfn, ptt, params->link_up);
+
+ ecore_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+void qed_link_update(struct ecore_hwfn *hwfn)
+{
+ struct qed_link_output if_link;
+
+ qed_fill_link(hwfn, &if_link);
+}
+
+static int qed_drain(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
+ int i, rc;
+
+ if (IS_VF(edev))
+ return 0;
+
+ for_each_hwfn(edev, i) {
+ hwfn = &edev->hwfns[i];
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
+ return -EBUSY;
+ }
+ rc = ecore_mcp_drain(hwfn, ptt);
+ if (rc)
+ return rc;
+ ecore_ptt_release(hwfn, ptt);
+ }
+
+ return 0;
+}
+
+static int qed_nic_stop(struct ecore_dev *edev)
+{
+ int i, rc;
+
+ rc = ecore_hw_stop(edev);
+ for (i = 0; i < edev->num_hwfns; i++) {
+ struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
+
+ if (p_hwfn->b_sp_dpc_enabled)
+ p_hwfn->b_sp_dpc_enabled = false;
+ }
+ return rc;
+}
+
+static int qed_nic_reset(struct ecore_dev *edev)
+{
+ int rc;
+
+ rc = ecore_hw_reset(edev);
+ if (rc)
+ return rc;
+
+ ecore_resc_free(edev);
+
+ return 0;
+}
+
+static int qed_slowpath_stop(struct ecore_dev *edev)
+{
+#ifdef CONFIG_QED_SRIOV
+ int i;
+#endif
+
+ if (!edev)
+ return -ENODEV;
+
+ if (IS_PF(edev)) {
+ qed_free_stream_mem(edev);
+
+#ifdef CONFIG_QED_SRIOV
+ if (IS_QED_ETH_IF(edev))
+ qed_sriov_disable(edev, true);
+#endif
+ qed_nic_stop(edev);
+ }
+
+ qed_nic_reset(edev);
+ qed_stop_iov_task(edev);
+
+ return 0;
+}
+
+static void qed_remove(struct ecore_dev *edev)
+{
+ if (!edev)
+ return;
+
+ ecore_hw_remove(edev);
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+ INIT_STRUCT_FIELD(probe, &qed_probe),
+ INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
+ INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
+ INIT_STRUCT_FIELD(set_id, &qed_set_id),
+ INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
+ INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
+ INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
+ INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
+ INIT_STRUCT_FIELD(set_link, &qed_set_link),
+ INIT_STRUCT_FIELD(drain, &qed_drain),
+ INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
+ INIT_STRUCT_FIELD(remove, &qed_remove),
+};
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
new file mode 100644
index 00000000..b5a40fe4
--- /dev/null
+++ b/drivers/net/qede/qede_rxtx.c
@@ -0,0 +1,1389 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "qede_rxtx.h"
+
+static bool gro_disable = 1; /* mod_param */
+
+static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+ struct rte_mbuf *new_mb = NULL;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+ new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!new_mb)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Failed to allocate rx buffer "
+ "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
+ idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+ rte_mempool_avail_count(rxq->mb_pool),
+ rte_mempool_in_use_count(rxq->mb_pool));
+ return -ENOMEM;
+ }
+ rxq->sw_rx_ring[idx].mbuf = new_mb;
+ rxq->sw_rx_ring[idx].page_offset = 0;
+ mapping = rte_mbuf_data_dma_addr_default(new_mb);
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
+ rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+ rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+ rxq->sw_rx_prod++;
+ return 0;
+}
+
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (rxq->sw_rx_ring != NULL) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_rx_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+ rxq->sw_rx_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+ struct qede_rx_queue *rxq = rx_queue;
+
+ if (rxq != NULL) {
+ qede_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_rx_ring);
+ rxq->sw_rx_ring = NULL;
+ rte_free(rxq);
+ rx_queue = NULL;
+ }
+}
+
+int
+qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct rte_eth_dev_data *eth_data = dev->data;
+ struct qede_rx_queue *rxq;
+ uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ size_t size;
+ uint16_t data_size;
+ int rc;
+ int i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
+ if (!rte_is_power_of_2(nb_desc)) {
+ DP_ERR(edev, "Ring size %u is not power of 2\n",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!rxq) {
+ DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ rxq->qdev = qdev;
+ rxq->mb_pool = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+
+ /* Sanity check */
+ data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
+ RTE_PKTMBUF_HEADROOM;
+
+ if (pkt_len > data_size) {
+ DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
+ pkt_len, data_size);
+ rte_free(rxq);
+ return -EINVAL;
+ }
+
+ qdev->mtu = pkt_len;
+ rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+
+ DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
+ qdev->mtu, rxq->rx_buf_size);
+
+ if (pkt_len > ETHER_MAX_LEN) {
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ DP_NOTICE(edev, false, "jumbo frame enabled\n");
+ } else {
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ }
+
+ /* Allocate the parallel driver ring for Rx buffers */
+ size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
+ rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_rx_ring) {
+ DP_NOTICE(edev, false,
+ "Unable to alloc memory for sw_rx_ring on socket %u\n",
+ socket_id);
+ rte_free(rxq);
+ rxq = NULL;
+ return -ENOMEM;
+ }
+
+ /* Allocate FW Rx ring */
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+ ECORE_CHAIN_MODE_NEXT_PTR,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ rxq->nb_rx_desc,
+ sizeof(struct eth_rx_bd),
+ &rxq->rx_bd_ring);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(edev, false,
+ "Unable to alloc memory for rxbd ring on socket %u\n",
+ socket_id);
+ rte_free(rxq->sw_rx_ring);
+ rxq->sw_rx_ring = NULL;
+ rte_free(rxq);
+ rxq = NULL;
+ }
+
+ /* Allocate FW completion ring */
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ rxq->nb_rx_desc,
+ sizeof(union eth_rx_cqe),
+ &rxq->rx_comp_ring);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(edev, false,
+ "Unable to alloc memory for cqe ring on socket %u\n",
+ socket_id);
+ /* TBD: Freeing RX BD ring */
+ rte_free(rxq->sw_rx_ring);
+ rxq->sw_rx_ring = NULL;
+ rte_free(rxq);
+ }
+
+ /* Allocate buffers for the Rx ring */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ rc = qede_alloc_rx_buffer(rxq);
+ if (rc) {
+ DP_NOTICE(edev, false,
+ "RX buffer allocation failed at idx=%d\n", i);
+ goto err4;
+ }
+ }
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ if (!qdev->rx_queues)
+ qdev->rx_queues = (struct qede_rx_queue **)dev->data->rx_queues;
+
+ DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
+ queue_idx, nb_desc, qdev->mtu, socket_id);
+
+ return 0;
+err4:
+ qede_rx_queue_release(rxq);
+ return -ENOMEM;
+}
+
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
+{
+ unsigned int i;
+
+ PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
+
+ if (txq->sw_tx_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_tx_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+ txq->sw_tx_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+void qede_tx_queue_release(void *tx_queue)
+{
+ struct qede_tx_queue *txq = tx_queue;
+
+ if (txq != NULL) {
+ qede_tx_queue_release_mbufs(txq);
+ if (txq->sw_tx_ring) {
+ rte_free(txq->sw_tx_ring);
+ txq->sw_tx_ring = NULL;
+ }
+ rte_free(txq);
+ }
+ tx_queue = NULL;
+}
+
+int
+qede_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qede_tx_queue *txq;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ DP_ERR(edev, "Ring size %u is not power of 2\n",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (txq == NULL) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txq on socket %u",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->qdev = qdev;
+ txq->port_id = dev->data->port_id;
+
+ rc = qdev->ops->common->chain_alloc(edev,
+ ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+ ECORE_CHAIN_MODE_PBL,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ txq->nb_tx_desc,
+ sizeof(union eth_tx_bd_types),
+ &txq->tx_pbl);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txbd ring on socket %u",
+ socket_id);
+ qede_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate software ring */
+ txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
+ (sizeof(struct qede_tx_entry) *
+ txq->nb_tx_desc),
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!txq->sw_tx_ring) {
+ DP_ERR(edev,
+ "Unable to allocate memory for txbd ring on socket %u",
+ socket_id);
+ qede_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->queue_id = queue_idx;
+
+ txq->nb_tx_avail = txq->nb_tx_desc;
+
+ txq->tx_free_thresh =
+ tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
+ (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
+
+ dev->data->tx_queues[queue_idx] = txq;
+ if (!qdev->tx_queues)
+ qdev->tx_queues = (struct qede_tx_queue **)dev->data->tx_queues;
+
+ txq->txq_counter = 0;
+
+ DP_INFO(edev,
+ "txq %u num_desc %u tx_free_thresh %u socket %u\n",
+ queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
+
+ return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
+static void qede_init_fp(struct qede_dev *qdev)
+{
+ struct qede_fastpath *fp;
+ int rss_id, txq_index, tc;
+
+ memset((void *)qdev->fp_array, 0, (QEDE_RSS_CNT(qdev) *
+ sizeof(*qdev->fp_array)));
+ memset((void *)qdev->sb_array, 0, (QEDE_RSS_CNT(qdev) *
+ sizeof(*qdev->sb_array)));
+ for_each_rss(rss_id) {
+ fp = &qdev->fp_array[rss_id];
+
+ fp->qdev = qdev;
+ fp->rss_id = rss_id;
+
+ /* Point rxq to generic rte queues that was created
+ * as part of queue creation.
+ */
+ fp->rxq = qdev->rx_queues[rss_id];
+ fp->sb_info = &qdev->sb_array[rss_id];
+
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ txq_index = tc * QEDE_RSS_CNT(qdev) + rss_id;
+ fp->txqs[tc] = qdev->tx_queues[txq_index];
+ fp->txqs[tc]->queue_id = txq_index;
+ /* Updating it to main structure */
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ "qdev", rss_id);
+ }
+ }
+
+ qdev->gro_disable = gro_disable;
+}
+
+void qede_free_fp_arrays(struct qede_dev *qdev)
+{
+ /* It asseumes qede_free_mem_load() is called before */
+ if (qdev->fp_array != NULL) {
+ rte_free(qdev->fp_array);
+ qdev->fp_array = NULL;
+ }
+
+ if (qdev->sb_array != NULL) {
+ rte_free(qdev->sb_array);
+ qdev->sb_array = NULL;
+ }
+}
+
+int qede_alloc_fp_array(struct qede_dev *qdev)
+{
+ struct qede_fastpath *fp;
+ struct ecore_dev *edev = &qdev->edev;
+ int i;
+
+ qdev->fp_array = rte_calloc("fp", QEDE_RSS_CNT(qdev),
+ sizeof(*qdev->fp_array),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!qdev->fp_array) {
+ DP_ERR(edev, "fp array allocation failed\n");
+ return -ENOMEM;
+ }
+
+ qdev->sb_array = rte_calloc("sb", QEDE_RSS_CNT(qdev),
+ sizeof(*qdev->sb_array),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!qdev->sb_array) {
+ DP_ERR(edev, "sb array allocation failed\n");
+ rte_free(qdev->fp_array);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* This function allocates fast-path status block memory */
+static int
+qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
+ uint16_t sb_id)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
+
+ if (!sb_virt) {
+ DP_ERR(edev, "Status block allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = qdev->ops->common->sb_init(edev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
+ if (rc) {
+ DP_ERR(edev, "Status block initialization failed\n");
+ /* TBD: No dma_free_coherent possible */
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qede_alloc_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
+{
+ return qede_alloc_mem_sb(qdev, fp->sb_info, fp->rss_id);
+}
+
+static void qede_shrink_txq(struct qede_dev *qdev, uint16_t num_rss)
+{
+ /* @@@TBD - this should also re-set the qed interrupts */
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *qdev)
+{
+ int rc = 0, rss_id;
+ struct ecore_dev *edev = &qdev->edev;
+
+ for (rss_id = 0; rss_id < QEDE_RSS_CNT(qdev); rss_id++) {
+ struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+
+ rc = qede_alloc_mem_fp(qdev, fp);
+ if (rc)
+ break;
+ }
+
+ if (rss_id != QEDE_RSS_CNT(qdev)) {
+ /* Failed allocating memory for all the queues */
+ if (!rss_id) {
+ DP_ERR(edev,
+ "Failed to alloc memory for leading queue\n");
+ rc = -ENOMEM;
+ } else {
+ DP_NOTICE(edev, false,
+ "Failed to allocate memory for all of "
+ "RSS queues\n"
+ "Desired: %d queues, allocated: %d queues\n",
+ QEDE_RSS_CNT(qdev), rss_id);
+ qede_shrink_txq(qdev, rss_id);
+ }
+ qdev->num_rss = rss_id;
+ }
+
+ return 0;
+}
+
+static inline void
+qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+ uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+ uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = { 0 };
+
+ /* Update producers */
+ rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
+ rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ rte_wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (uint32_t *)&rx_prods);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the napi lock is released and another qede_poll is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ rte_wmb();
+
+ PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod);
+}
+
+static inline uint32_t
+qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
+{
+ return index % n_rx_rings;
+}
+
+static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
+{
+ unsigned int i;
+
+ srand((unsigned int)time(NULL));
+
+ for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
+ buff[i] = rand();
+}
+
+static int
+qede_config_rss(struct rte_eth_dev *eth_dev,
+ struct qed_update_vport_rss_params *rss_params)
+{
+ struct rte_eth_rss_conf rss_conf;
+ enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ uint8_t rss_caps;
+ unsigned int i;
+ uint64_t hf;
+ uint32_t *key;
+
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ key = (uint32_t *)rss_conf.rss_key;
+ hf = rss_conf.rss_hf;
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Check if RSS conditions are met.
+ * Note: Even though its meaningless to enable RSS with one queue, it
+ * could be used to produce RSS Hash, so skipping that check.
+ */
+
+ if (!(mode & ETH_MQ_RX_RSS)) {
+ DP_INFO(edev, "RSS flag is not set\n");
+ return -EINVAL;
+ }
+
+ DP_INFO(edev, "RSS flag is set\n");
+
+ if (rss_conf.rss_hf == 0)
+ DP_NOTICE(edev, false, "RSS hash function = 0, disables RSS\n");
+
+ if (rss_conf.rss_key != NULL)
+ memcpy(qdev->rss_params.rss_key, rss_conf.rss_key,
+ rss_conf.rss_key_len);
+
+ memset(rss_params, 0, sizeof(*rss_params));
+
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
+ rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
+ QEDE_RSS_CNT(qdev));
+
+ /* key and protocols */
+ if (rss_conf.rss_key == NULL)
+ qede_prandom_bytes(rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ else
+ memcpy(rss_params->rss_key, rss_conf.rss_key,
+ rss_conf.rss_key_len);
+
+ rss_caps = 0;
+ rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
+ rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
+ rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
+ rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
+ rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
+ rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+
+ rss_params->rss_caps = rss_caps;
+
+ DP_INFO(edev, "RSS check passes\n");
+
+ return 0;
+}
+
+static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
+ struct qed_dev_info *qed_info = &qdev->dev_info.common;
+ struct qed_update_vport_params vport_update_params;
+ struct qed_start_vport_params start = { 0 };
+ int vlan_removal_en = 1;
+ int rc, tc, i;
+
+ if (!qdev->num_rss) {
+ DP_ERR(edev,
+ "Cannot update V-VPORT as active as "
+ "there are no Rx queues\n");
+ return -EINVAL;
+ }
+
+ start.remove_inner_vlan = vlan_removal_en;
+ start.gro_enable = !qdev->gro_disable;
+ start.mtu = qdev->mtu;
+ start.vport_id = 0;
+ start.drop_ttl0 = true;
+ start.clear_stats = clear_stats;
+
+ rc = qdev->ops->vport_start(edev, &start);
+ if (rc) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ DP_INFO(edev,
+ "Start vport ramrod passed, vport_id = %d,"
+ " MTU = %d, vlan_removal_en = %d\n",
+ start.vport_id, qdev->mtu, vlan_removal_en);
+
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &qdev->fp_array[i];
+ dma_addr_t p_phys_table;
+ uint16_t page_cnt;
+
+ p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+ page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+
+ ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); /* @DPDK */
+
+ rc = qdev->ops->q_rx_start(edev, i, i, 0,
+ fp->sb_info->igu_sb_id,
+ RX_PI,
+ fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table,
+ page_cnt,
+ &fp->rxq->hw_rxq_prod_addr);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+ return rc;
+ }
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+ qede_update_rx_prod(qdev, fp->rxq);
+
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct qede_tx_queue *txq = fp->txqs[tc];
+ int txq_index = tc * QEDE_RSS_CNT(qdev) + i;
+
+ p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+ rc = qdev->ops->q_tx_start(edev, i, txq_index,
+ 0,
+ fp->sb_info->igu_sb_id,
+ TX_PI(tc),
+ p_phys_table, page_cnt,
+ &txq->doorbell_addr);
+ if (rc) {
+ DP_ERR(edev, "Start txq %u failed %d\n",
+ txq_index, rc);
+ return rc;
+ }
+
+ txq->hw_cons_ptr =
+ &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ }
+ }
+
+ /* Prepare and send the vport enable */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = start.vport_id;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 1;
+
+ /* @DPDK */
+ if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
+ /* TBD: Check SRIOV enabled for VF */
+ vport_update_params.update_tx_switching_flg = 1;
+ vport_update_params.tx_switching_flg = 1;
+ }
+
+ if (!qede_config_rss(eth_dev, rss_params)) {
+ vport_update_params.update_rss_flg = 1;
+
+ qdev->rss_enabled = 1;
+ DP_INFO(edev, "Updating RSS flag\n");
+ } else {
+ qdev->rss_enabled = 0;
+ DP_INFO(edev, "Not Updating RSS flag\n");
+ }
+
+ rte_memcpy(&vport_update_params.rss_params, rss_params,
+ sizeof(*rss_params));
+
+ rc = qdev->ops->vport_update(edev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#ifdef ENC_SUPPORTED
+static bool qede_tunn_exist(uint16_t flag)
+{
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
+}
+
+static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+{
+ uint8_t tcsum = 0;
+ uint16_t csum_flag = 0;
+
+ if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+#else
+static inline uint8_t qede_tunn_exist(uint16_t flag)
+{
+ return 0;
+}
+
+static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+{
+ return 0;
+}
+#endif
+
+static inline uint8_t qede_check_notunn_csum(uint16_t flag)
+{
+ uint8_t csum = 0;
+ uint16_t csum_flag = 0;
+
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ csum = QEDE_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return csum;
+}
+
+static inline uint8_t qede_check_csum(uint16_t flag)
+{
+ if (likely(!qede_tunn_exist(flag)))
+ return qede_check_notunn_csum(flag);
+ else
+ return qede_check_tunn_csum(flag);
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ ecore_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
+static inline void
+qede_reuse_page(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
+{
+ struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
+ uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ struct qede_rx_entry *curr_prod;
+ dma_addr_t new_mapping;
+
+ curr_prod = &rxq->sw_rx_ring[idx];
+ *curr_prod = *curr_cons;
+
+ new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
+ curr_prod->page_offset;
+
+ rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
+ rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
+
+ rxq->sw_rx_prod++;
+}
+
+static inline void
+qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *qdev, uint8_t count)
+{
+ struct qede_rx_entry *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
+ qede_reuse_page(qdev, rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+{
+ uint32_t p_type;
+ /* TBD - L4 indications needed ? */
+ uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+ PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
+
+ /* protocol = 3 means LLC/SNAP over Ethernet */
+ if (unlikely(protocol == 0 || protocol == 3))
+ p_type = RTE_PTYPE_UNKNOWN;
+ else if (protocol == 1)
+ p_type = RTE_PTYPE_L3_IPV4;
+ else if (protocol == 2)
+ p_type = RTE_PTYPE_L3_IPV6;
+
+ return RTE_PTYPE_L2_ETHER | p_type;
+}
+
+uint16_t
+qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct qede_rx_queue *rxq = p_rxq;
+ struct qede_dev *qdev = rxq->qdev;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
+ uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
+ uint16_t rx_pkt = 0;
+ union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ register struct rte_mbuf *rx_mb = NULL;
+ enum eth_rx_cqe_type cqe_type;
+ uint16_t len, pad;
+ uint16_t preload_idx;
+ uint8_t csum_flag;
+ uint16_t parse_flag;
+ enum rss_hash_type htype;
+
+ hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ rte_rmb();
+
+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
+ while (sw_comp_cons != hw_comp_cons) {
+ /* Get the CQE from the completion ring */
+ cqe =
+ (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
+
+ qdev->ops->eth_cqe_completion(edev, fp->rss_id,
+ (struct eth_slow_path_rx_cqe *)cqe);
+ goto next_cqe;
+ }
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ assert(rx_mb != NULL);
+
+ /* non GRO */
+ fp_cqe = &cqe->fast_path_regular;
+
+ len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+ pad = fp_cqe->placement_offset;
+ assert((len + pad) <= rx_mb->buf_len);
+
+ PMD_RX_LOG(DEBUG, rxq,
+ "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
+ " len = %u, parsing_flags = %d\n",
+ cqe_type, fp_cqe->bitfields,
+ rte_le_to_cpu_16(fp_cqe->vlan_tag),
+ len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
+
+ /* If this is an error packet then drop it */
+ parse_flag =
+ rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
+ csum_flag = qede_check_csum(parse_flag);
+ if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ PMD_RX_LOG(ERR, rxq,
+ "CQE in CONS = %u has error, flags = 0x%x "
+ "dropping incoming packet\n",
+ sw_comp_cons, parse_flag);
+ rxq->rx_hw_errors++;
+ qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
+ goto next_cqe;
+ }
+
+ if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+ PMD_RX_LOG(ERR, rxq,
+ "New buffer allocation failed,"
+ "dropping incoming packet\n");
+ qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
+ rte_eth_devices[rxq->port_id].
+ data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ break;
+ }
+
+ qede_rx_bd_ring_consume(rxq);
+
+ /* Prefetch next mbuf while processing current one. */
+ preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+
+ if (fp_cqe->bd_num != 1)
+ PMD_RX_LOG(DEBUG, rxq,
+ "Jumbo-over-BD packet not supported\n");
+
+ /* Update MBUF fields */
+ rx_mb->ol_flags = 0;
+ rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
+ rx_mb->nb_segs = 1;
+ rx_mb->data_len = len;
+ rx_mb->pkt_len = len;
+ rx_mb->port = rxq->port_id;
+ rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
+
+ htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
+ ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+ if (qdev->rss_enabled && htype) {
+ rx_mb->ol_flags |= PKT_RX_RSS_HASH;
+ rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
+ PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
+ rx_mb->hash.rss);
+ }
+
+ rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
+
+ if (CQE_HAS_VLAN(parse_flag)) {
+ rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+ rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
+ }
+
+ if (CQE_HAS_OUTER_VLAN(parse_flag)) {
+ /* FW does not provide indication of Outer VLAN tag,
+ * which is always stripped, so vlan_tci_outer is set
+ * to 0. Here vlan_tag represents inner VLAN tag.
+ */
+ rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+ rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
+ }
+
+ rx_pkts[rx_pkt] = rx_mb;
+ rx_pkt++;
+next_cqe:
+ ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+ if (rx_pkt == nb_pkts) {
+ PMD_RX_LOG(DEBUG, rxq,
+ "Budget reached nb_pkts=%u received=%u\n",
+ rx_pkt, nb_pkts);
+ break;
+ }
+ }
+
+ qede_update_rx_prod(qdev, rxq);
+
+ PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
+
+ return rx_pkt;
+}
+
+static inline int
+qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
+{
+ uint16_t idx = TX_CONS(txq);
+ struct eth_tx_bd *tx_data_bd;
+ struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
+
+ if (unlikely(!mbuf)) {
+ PMD_TX_LOG(ERR, txq,
+ "null mbuf nb_tx_desc %u nb_tx_avail %u "
+ "sw_tx_cons %u sw_tx_prod %u\n",
+ txq->nb_tx_desc, txq->nb_tx_avail, idx,
+ TX_PROD(txq));
+ return -1;
+ }
+
+ /* Free now */
+ rte_pktmbuf_free_seg(mbuf);
+ txq->sw_tx_ring[idx].mbuf = NULL;
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+
+ return 0;
+}
+
+static inline uint16_t
+qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
+{
+ uint16_t tx_compl = 0;
+ uint16_t hw_bd_cons;
+ int rc;
+
+ hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+ rte_compiler_barrier();
+
+ while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
+ rc = qede_free_tx_pkt(edev, txq);
+ if (rc) {
+ DP_NOTICE(edev, false,
+ "hw_bd_cons = %d, chain_cons=%d\n",
+ hw_bd_cons,
+ ecore_chain_get_cons_idx(&txq->tx_pbl));
+ break;
+ }
+ txq->sw_tx_cons++; /* Making TXD available */
+ tx_compl++;
+ }
+
+ PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
+ tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
+ return tx_compl;
+}
+
+uint16_t
+qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct qede_tx_queue *txq = p_txq;
+ struct qede_dev *qdev = txq->qdev;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qede_fastpath *fp = &qdev->fp_array[txq->queue_id];
+ struct eth_tx_1st_bd *first_bd;
+ uint16_t nb_tx_pkts;
+ uint16_t nb_pkt_sent = 0;
+ uint16_t bd_prod;
+ uint16_t idx;
+ uint16_t tx_count;
+
+ if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
+ PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
+ nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
+ (void)qede_process_tx_compl(edev, txq);
+ }
+
+ nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS));
+ if (unlikely(nb_tx_pkts == 0)) {
+ PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
+ nb_pkts, txq->nb_tx_avail);
+ return 0;
+ }
+
+ tx_count = nb_tx_pkts;
+ while (nb_tx_pkts--) {
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = TX_PROD(txq);
+ struct rte_mbuf *mbuf = *tx_pkts++;
+ txq->sw_tx_ring[idx].mbuf = mbuf;
+ first_bd = (struct eth_tx_1st_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ first_bd->data.bd_flags.bitfields =
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ /* Map MBUF linear data for DMA and set in the first BD */
+ QEDE_BD_SET_ADDR_LEN(first_bd, rte_mbuf_data_dma_addr(mbuf),
+ mbuf->data_len);
+
+ /* Descriptor based VLAN insertion */
+ if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ first_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ /* Offload the IP checksum in the hardware */
+ if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ }
+
+ /* L4 checksum offload (tcp or udp) */
+ if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ /* IPv6 + extn. -> later */
+ }
+ first_bd->data.nbds = MAX_NUM_TX_BDS;
+ txq->sw_tx_prod++;
+ rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
+ txq->nb_tx_avail--;
+ bd_prod =
+ rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+ nb_pkt_sent++;
+ }
+
+ /* Write value of prod idx into bd_prod */
+ txq->tx_db.data.bd_prod = bd_prod;
+ rte_wmb();
+ rte_compiler_barrier();
+ DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
+ rte_wmb();
+
+ /* Check again for Tx completions */
+ (void)qede_process_tx_compl(edev, txq);
+
+ PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
+ nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
+
+ return nb_pkt_sent;
+}
+
+int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output link_output;
+ int rc;
+
+ DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
+
+ if (qdev->state == QEDE_START) {
+ DP_INFO(edev, "device already started\n");
+ return 0;
+ }
+
+ if (qdev->state == QEDE_CLOSE) {
+ rc = qede_alloc_fp_array(qdev);
+ qede_init_fp(qdev);
+ rc = qede_alloc_mem_load(qdev);
+ DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+ QEDE_RSS_CNT(qdev), qdev->num_tc);
+ } else if (qdev->state == QEDE_STOP) {
+ DP_INFO(edev, "restarting port %u\n", eth_dev->data->port_id);
+ } else {
+ DP_INFO(edev, "unknown state port %u\n",
+ eth_dev->data->port_id);
+ return -EINVAL;
+ }
+
+ rc = qede_start_queues(eth_dev, true);
+
+ if (rc) {
+ DP_ERR(edev, "Failed to start queues\n");
+ /* TBD: free */
+ return rc;
+ }
+
+ DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+ qede_dev_set_link_state(eth_dev, true);
+
+ /* Query whether link is already-up */
+ memset(&link_output, 0, sizeof(link_output));
+ qdev->ops->common->get_link(edev, &link_output);
+ DP_NOTICE(edev, false, "link status: %s\n",
+ link_output.link_up ? "up" : "down");
+
+ qdev->state = QEDE_START;
+
+ qede_config_rx_mode(eth_dev);
+
+ DP_INFO(edev, "dev_state is QEDE_START\n");
+
+ return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *qdev,
+ struct qede_tx_queue *txq, bool allow_drain)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ qede_process_tx_compl(edev, txq);
+ if (!cnt) {
+ if (allow_drain) {
+ DP_NOTICE(edev, false,
+ "Tx queue[%u] is stuck,"
+ "requesting MCP to drain\n",
+ txq->queue_id);
+ rc = qdev->ops->common->drain(edev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(qdev, txq, false);
+ }
+
+ DP_NOTICE(edev, false,
+ "Timeout waiting for tx queue[%d]:"
+ "PROD=%d, CONS=%d\n",
+ txq->queue_id, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -ENODEV;
+ }
+ cnt--;
+ DELAY(1000);
+ rte_compiler_barrier();
+ }
+
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ DELAY(2000);
+
+ return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *qdev)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct ecore_dev *edev = &qdev->edev;
+ int rc, tc, i;
+
+ /* Disable the vport */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = 0;
+ vport_update_params.update_vport_active_flg = 1;
+ vport_update_params.vport_active_flg = 0;
+ vport_update_params.update_rss_flg = 0;
+
+ DP_INFO(edev, "vport_update\n");
+
+ rc = qdev->ops->vport_update(edev, &vport_update_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return rc;
+ }
+
+ DP_INFO(edev, "Flushing tx queues\n");
+
+ /* Flush Tx queues. If needed, request drain from MCP */
+ for_each_rss(i) {
+ struct qede_fastpath *fp = &qdev->fp_array[i];
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct qede_tx_queue *txq = fp->txqs[tc];
+ rc = qede_drain_txq(qdev, txq, true);
+ if (rc)
+ return rc;
+ }
+ }
+
+ /* Stop all Queues in reverse order */
+ for (i = QEDE_RSS_CNT(qdev) - 1; i >= 0; i--) {
+ struct qed_stop_rxq_params rx_params;
+
+ /* Stop the Tx Queue(s) */
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+
+ tx_params.rss_id = i;
+ tx_params.tx_queue_id = tc * QEDE_RSS_CNT(qdev) + i;
+
+ DP_INFO(edev, "Stopping tx queues\n");
+ rc = qdev->ops->q_tx_stop(edev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
+ }
+
+ /* Stop the Rx Queue */
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = i;
+ rx_params.eq_completion_only = 1;
+
+ DP_INFO(edev, "Stopping rx queues\n");
+
+ rc = qdev->ops->q_rx_stop(edev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
+ }
+
+ DP_INFO(edev, "Stopping vports\n");
+
+ /* Stop the vport */
+ rc = qdev->ops->vport_stop(edev, 0);
+ if (rc)
+ DP_ERR(edev, "Failed to stop VPORT\n");
+
+ return rc;
+}
+
+void qede_reset_fp_rings(struct qede_dev *qdev)
+{
+ uint16_t rss_id;
+ uint8_t tc;
+
+ for_each_rss(rss_id) {
+ DP_INFO(&qdev->edev, "reset fp chain for rss %u\n", rss_id);
+ struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+ ecore_chain_reset(&fp->rxq->rx_bd_ring);
+ ecore_chain_reset(&fp->rxq->rx_comp_ring);
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct qede_tx_queue *txq = fp->txqs[tc];
+ ecore_chain_reset(&txq->tx_pbl);
+ }
+ }
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
+{
+ uint8_t tc;
+
+ qede_rx_queue_release(fp->rxq);
+ for (tc = 0; tc < qdev->num_tc; tc++)
+ qede_tx_queue_release(fp->txqs[tc]);
+}
+
+void qede_free_mem_load(struct qede_dev *qdev)
+{
+ uint8_t rss_id;
+
+ for_each_rss(rss_id) {
+ struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+ qede_free_mem_fp(qdev, fp);
+ }
+ /* qdev->num_rss = 0; */
+}
+
+/*
+ * Stop an Ethernet device. The device can be restarted with a call to
+ * rte_eth_dev_start().
+ * Do not change link state and do not release sw structures.
+ */
+void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ int rc;
+
+ DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
+
+ if (qdev->state != QEDE_START) {
+ DP_INFO(edev, "device not yet started\n");
+ return;
+ }
+
+ rc = qede_stop_queues(qdev);
+
+ if (rc)
+ DP_ERR(edev, "Didn't succeed to close queues\n");
+
+ DP_INFO(edev, "Stopped queues\n");
+
+ qdev->ops->fastpath_stop(edev);
+
+ qede_reset_fp_rings(qdev);
+
+ qdev->state = QEDE_STOP;
+
+ DP_INFO(edev, "dev_state is QEDE_STOP\n");
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
new file mode 100644
index 00000000..34eaf8b6
--- /dev/null
+++ b/drivers/net/qede/qede_rxtx.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+
+#ifndef _QEDE_RXTX_H_
+#define _QEDE_RXTX_H_
+
+#include "qede_ethdev.h"
+
+/* Ring Descriptors */
+#define RX_RING_SIZE_POW 16 /* 64K */
+#define RX_RING_SIZE (1ULL << RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS(q) (q->nb_rx_desc - 1)
+
+#define TX_RING_SIZE_POW 16 /* 64K */
+#define TX_RING_SIZE (1ULL << TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define NUM_TX_BDS(q) (q->nb_tx_desc - 1)
+
+#define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq))
+#define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq))
+
+/* Number of TX BDs per packet used currently */
+#define MAX_NUM_TX_BDS 1
+
+#define QEDE_DEFAULT_TX_FREE_THRESH 32
+
+#define QEDE_CSUM_ERROR (1 << 0)
+#define QEDE_CSUM_UNNECESSARY (1 << 1)
+#define QEDE_TUNN_CSUM_UNNECESSARY (1 << 2)
+
+#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
+ do { \
+ (bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
+ (bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
+ (bd)->nbytes = rte_cpu_to_le_16(len); \
+ } while (0)
+
+#define CQE_HAS_VLAN(flags) \
+ ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
+ << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
+
+#define CQE_HAS_OUTER_VLAN(flags) \
+ ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
+ << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+
+/* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_L1_CACHE_SHIFT 6
+#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
+#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
+
+#define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+
+/* TBD: Excluding IPV6 */
+#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP)
+
+#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
+
+#define MAX_NUM_TC 8
+
+#define for_each_rss(i) for (i = 0; i < qdev->num_rss; i++)
+
+/*
+ * RX BD descriptor ring
+ */
+struct qede_rx_entry {
+ struct rte_mbuf *mbuf;
+ uint32_t page_offset;
+ /* allows expansion .. */
+};
+
+/*
+ * Structure associated with each RX queue.
+ */
+struct qede_rx_queue {
+ struct rte_mempool *mb_pool;
+ struct ecore_chain rx_bd_ring;
+ struct ecore_chain rx_comp_ring;
+ uint16_t *hw_cons_ptr;
+ void OSAL_IOMEM *hw_rxq_prod_addr;
+ struct qede_rx_entry *sw_rx_ring;
+ uint16_t sw_rx_cons;
+ uint16_t sw_rx_prod;
+ uint16_t nb_rx_desc;
+ uint16_t queue_id;
+ uint16_t port_id;
+ uint16_t rx_buf_size;
+ uint64_t rx_hw_errors;
+ uint64_t rx_alloc_errors;
+ struct qede_dev *qdev;
+};
+
+/*
+ * TX BD descriptor ring
+ */
+struct qede_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint8_t flags;
+};
+
+union db_prod {
+ struct eth_db_data data;
+ uint32_t raw;
+};
+
+struct qede_tx_queue {
+ struct ecore_chain tx_pbl;
+ struct qede_tx_entry *sw_tx_ring;
+ uint16_t nb_tx_desc;
+ uint16_t nb_tx_avail;
+ uint16_t tx_free_thresh;
+ uint16_t queue_id;
+ uint16_t *hw_cons_ptr;
+ uint16_t sw_tx_cons;
+ uint16_t sw_tx_prod;
+ void OSAL_IOMEM *doorbell_addr;
+ volatile union db_prod tx_db;
+ uint16_t port_id;
+ uint64_t txq_counter;
+ struct qede_dev *qdev;
+};
+
+struct qede_fastpath {
+ struct qede_dev *qdev;
+ uint8_t rss_id;
+ struct ecore_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txqs[MAX_NUM_TC];
+ char name[80];
+};
+
+/*
+ * RX/TX function prototypes
+ */
+int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+
+int qede_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+void qede_rx_queue_release(void *rx_queue);
+
+void qede_tx_queue_release(void *tx_queue);
+
+int qede_dev_start(struct rte_eth_dev *eth_dev);
+
+void qede_dev_stop(struct rte_eth_dev *eth_dev);
+
+void qede_reset_fp_rings(struct qede_dev *qdev);
+
+void qede_free_fp_arrays(struct qede_dev *qdev);
+
+void qede_free_mem_load(struct qede_dev *qdev);
+
+uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _QEDE_RXTX_H_ */
diff --git a/drivers/net/qede/rte_pmd_qede_version.map b/drivers/net/qede/rte_pmd_qede_version.map
new file mode 100644
index 00000000..349c6e1c
--- /dev/null
+++ b/drivers/net/qede/rte_pmd_qede_version.map
@@ -0,0 +1,4 @@
+DPDK_16.04 {
+
+ local: *;
+};
diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile
index 963a8d67..4a7b14c9 100644
--- a/drivers/net/szedata2/Makefile
+++ b/drivers/net/szedata2/Makefile
@@ -55,9 +55,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c
SYMLINK-y-include +=
# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_malloc
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 78c43b0c..985a8d60 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1481,7 +1481,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
return -EINVAL;
}
snprintf(rsc_filename, PATH_MAX,
- SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/resource%u",
+ "%s/" PCI_PRI_FMT "/resource%u", pci_get_sysfs_path(),
pci_addr->domain, pci_addr->bus,
pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
fd = open(rsc_filename, O_RDWR);
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
new file mode 100644
index 00000000..8ea6b454
--- /dev/null
+++ b/drivers/net/thunderx/Makefile
@@ -0,0 +1,70 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Cavium Networks. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_thunderx_nicvf.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lm
+
+EXPORT_MAP := rte_pmd_thunderx_nicvf_version.map
+
+LIBABIVER := 1
+
+OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_mbox.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_ethdev.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_nicvf_rxtx.o += -fno-prefetch-loop-arrays
+endif
+CFLAGS_nicvf_rxtx.o += -Ofast
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_mempool lib/librte_mbuf
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
new file mode 100644
index 00000000..001b0edd
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -0,0 +1,905 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <math.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "nicvf_plat.h"
+
+struct nicvf_reg_info {
+ uint32_t offset;
+ const char *name;
+};
+
+#define NICVF_REG_POLL_ITER_NR (10)
+#define NICVF_REG_POLL_DELAY_US (2000)
+#define NICVF_REG_INFO(reg) {reg, #reg}
+
+static const struct nicvf_reg_info nicvf_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_VF_CFG),
+ NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
+ NICVF_REG_INFO(NIC_VF_INT),
+ NICVF_REG_INFO(NIC_VF_INT_W1S),
+ NICVF_REG_INFO(NIC_VF_ENA_W1C),
+ NICVF_REG_INFO(NIC_VF_ENA_W1S),
+ NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
+ NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
+};
+
+static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
+ {NIC_VNIC_RSS_KEY_0_4 + 0, "NIC_VNIC_RSS_KEY_0"},
+ {NIC_VNIC_RSS_KEY_0_4 + 8, "NIC_VNIC_RSS_KEY_1"},
+ {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
+ {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
+ {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
+ {NIC_VNIC_TX_STAT_0_4 + 0, "NIC_VNIC_STAT_TX_OCTS"},
+ {NIC_VNIC_TX_STAT_0_4 + 8, "NIC_VNIC_STAT_TX_UCAST"},
+ {NIC_VNIC_TX_STAT_0_4 + 16, "NIC_VNIC_STAT_TX_BCAST"},
+ {NIC_VNIC_TX_STAT_0_4 + 24, "NIC_VNIC_STAT_TX_MCAST"},
+ {NIC_VNIC_TX_STAT_0_4 + 32, "NIC_VNIC_STAT_TX_DROP"},
+ {NIC_VNIC_RX_STAT_0_13 + 0, "NIC_VNIC_STAT_RX_OCTS"},
+ {NIC_VNIC_RX_STAT_0_13 + 8, "NIC_VNIC_STAT_RX_UCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
+ {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
+ {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
+ {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
+ {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
+ {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
+ {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
+ {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
+};
+
+static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
+ NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
+};
+
+static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
+ NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
+ NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
+};
+
+static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
+ NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
+};
+
+static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
+ NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
+};
+
+int
+nicvf_base_init(struct nicvf *nic)
+{
+ nic->hwcap = 0;
+ if (nic->subsystem_device_id == 0)
+ return NICVF_ERR_BASE_INIT;
+
+ if (nicvf_hw_version(nic) == NICVF_PASS2)
+ nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
+
+ return NICVF_OK;
+}
+
+/* dump on stdout if data is NULL */
+int
+nicvf_reg_dump(struct nicvf *nic, uint64_t *data)
+{
+ uint32_t i, q;
+ bool dump_stdout;
+
+ dump_stdout = data ? 0 : 1;
+
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%24s = 0x%" PRIx64 "\n",
+ nicvf_reg_tbl[i].name,
+ nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
+ else
+ *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
+
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%24s = 0x%" PRIx64 "\n",
+ nicvf_multi_reg_tbl[i].name,
+ nicvf_reg_read(nic,
+ nicvf_multi_reg_tbl[i].offset));
+ else
+ *data++ = nicvf_reg_read(nic,
+ nicvf_multi_reg_tbl[i].offset);
+
+ for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
+ nicvf_qset_cq_reg_tbl[i].name, q,
+ nicvf_queue_reg_read(nic,
+ nicvf_qset_cq_reg_tbl[i].offset, q));
+ else
+ *data++ = nicvf_queue_reg_read(nic,
+ nicvf_qset_cq_reg_tbl[i].offset, q);
+
+ for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
+ nicvf_qset_rq_reg_tbl[i].name, q,
+ nicvf_queue_reg_read(nic,
+ nicvf_qset_rq_reg_tbl[i].offset, q));
+ else
+ *data++ = nicvf_queue_reg_read(nic,
+ nicvf_qset_rq_reg_tbl[i].offset, q);
+
+ for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
+ nicvf_qset_sq_reg_tbl[i].name, q,
+ nicvf_queue_reg_read(nic,
+ nicvf_qset_sq_reg_tbl[i].offset, q));
+ else
+ *data++ = nicvf_queue_reg_read(nic,
+ nicvf_qset_sq_reg_tbl[i].offset, q);
+
+ for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
+ for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
+ if (dump_stdout)
+ nicvf_log("%30s(%d) = 0x%" PRIx64 "\n",
+ nicvf_qset_rbdr_reg_tbl[i].name, q,
+ nicvf_queue_reg_read(nic,
+ nicvf_qset_rbdr_reg_tbl[i].offset, q));
+ else
+ *data++ = nicvf_queue_reg_read(nic,
+ nicvf_qset_rbdr_reg_tbl[i].offset, q);
+ return 0;
+}
+
+int
+nicvf_reg_get_count(void)
+{
+ int nr_regs;
+
+ nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
+ MAX_CMP_QUEUES_PER_QS;
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
+ MAX_RCV_QUEUES_PER_QS;
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
+ MAX_SND_QUEUES_PER_QS;
+ nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
+ MAX_RCV_BUF_DESC_RINGS_PER_QS;
+
+ return nr_regs;
+}
+
+static int
+nicvf_qset_config_internal(struct nicvf *nic, bool enable)
+{
+ int ret;
+ struct pf_qs_cfg pf_qs_cfg = {.value = 0};
+
+ pf_qs_cfg.ena = enable ? 1 : 0;
+ pf_qs_cfg.vnic = nic->vf_id;
+ ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
+ return ret ? NICVF_ERR_SET_QS : 0;
+}
+
+/* Requests PF to assign and enable Qset */
+int
+nicvf_qset_config(struct nicvf *nic)
+{
+ /* Enable Qset */
+ return nicvf_qset_config_internal(nic, true);
+}
+
+int
+nicvf_qset_reclaim(struct nicvf *nic)
+{
+ /* Disable Qset */
+ return nicvf_qset_config_internal(nic, false);
+}
+
+static int
+cmpfunc(const void *a, const void *b)
+{
+ return (*(const uint32_t *)a - *(const uint32_t *)b);
+}
+
+static uint32_t
+nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
+{
+ uint32_t i;
+
+ qsort(list, entries, sizeof(uint32_t), cmpfunc);
+ for (i = 0; i < entries; i++)
+ if (val <= list[i])
+ break;
+ /* Not in the list */
+ if (i >= entries)
+ return 0;
+ else
+ return list[i];
+}
+
+static void
+nicvf_handle_qset_err_intr(struct nicvf *nic)
+{
+ uint16_t qidx;
+ uint64_t status;
+
+ nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
+ nicvf_reg_dump(nic, NULL);
+
+ for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
+ status = nicvf_queue_reg_read(
+ nic, NIC_QSET_CQ_0_7_STATUS, qidx);
+ if (!(status & NICVF_CQ_ERR_MASK))
+ continue;
+
+ if (status & NICVF_CQ_WR_FULL)
+ nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
+ if (status & NICVF_CQ_WR_DISABLE)
+ nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
+ if (status & NICVF_CQ_WR_FAULT)
+ nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
+ }
+
+ for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ status = nicvf_queue_reg_read(
+ nic, NIC_QSET_SQ_0_7_STATUS, qidx);
+ if (!(status & NICVF_SQ_ERR_MASK))
+ continue;
+
+ if (status & NICVF_SQ_ERR_STOPPED)
+ nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
+ if (status & NICVF_SQ_ERR_SEND)
+ nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
+ if (status & NICVF_SQ_ERR_DPE)
+ nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
+ }
+
+ for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
+ status = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS0, qidx);
+ status &= NICVF_RBDR_FIFO_STATE_MASK;
+ status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
+
+ if (status == RBDR_FIFO_STATE_FAIL)
+ nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
+ }
+
+ nicvf_disable_all_interrupts(nic);
+ abort();
+}
+
+/*
+ * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
+ * This function is not re-entrant.
+ * The caller should provide proper serialization.
+ */
+int
+nicvf_reg_poll_interrupts(struct nicvf *nic)
+{
+ int msg = 0;
+ uint64_t intr;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ if (intr & NICVF_INTR_MBOX_MASK) {
+ nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
+ msg = nicvf_handle_mbx_intr(nic);
+ }
+ if (intr & NICVF_INTR_QS_ERR_MASK) {
+ nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
+ nicvf_handle_qset_err_intr(nic);
+ }
+ return msg;
+}
+
+static int
+nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
+ uint32_t bit_pos, uint32_t bits, uint64_t val)
+{
+ uint64_t bit_mask;
+ uint64_t reg_val;
+ int timeout = NICVF_REG_POLL_ITER_NR;
+
+ bit_mask = (1ULL << bits) - 1;
+ bit_mask = (bit_mask << bit_pos);
+
+ while (timeout) {
+ reg_val = nicvf_queue_reg_read(nic, offset, qidx);
+ if (((reg_val & bit_mask) >> bit_pos) == val)
+ return NICVF_OK;
+ nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
+ timeout--;
+ }
+ return NICVF_ERR_REG_POLL;
+}
+
+int
+nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+ uint64_t status;
+ int timeout = NICVF_REG_POLL_ITER_NR;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+
+ /* Save head and tail pointers for freeing up buffers */
+ if (rbdr) {
+ rbdr->head = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
+ rbdr->next_tail = rbdr->tail;
+ }
+
+ /* Reset RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
+ NICVF_RBDR_RESET);
+
+ /* Disable RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+ if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
+ 62, 2, 0x00))
+ return NICVF_ERR_RBDR_DISABLE;
+
+ while (1) {
+ status = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx);
+ if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
+ break;
+ nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
+ timeout--;
+ if (!timeout)
+ return NICVF_ERR_RBDR_PREFETCH;
+ }
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
+ NICVF_RBDR_RESET);
+ if (nicvf_qset_poll_reg(nic, qidx,
+ NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+ return NICVF_ERR_RBDR_RESET1;
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+ if (nicvf_qset_poll_reg(nic, qidx,
+ NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return NICVF_ERR_RBDR_RESET2;
+
+ return NICVF_OK;
+}
+
+static int
+nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
+{
+ int val;
+
+ val = ((uint32_t)log2(len) - len_shift);
+ assert(val >= NICVF_QSIZE_MIN_VAL);
+ assert(val <= NICVF_QSIZE_MAX_VAL);
+ return val;
+}
+
+int
+nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
+{
+ int ret;
+ uint64_t head, tail;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+ struct rbdr_cfg rbdr_cfg = {.value = 0};
+
+ ret = nicvf_qset_rbdr_reclaim(nic, qidx);
+ if (ret)
+ return ret;
+
+ /* Set descriptor base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
+
+ /* Enable RBDR & set queue size */
+ rbdr_cfg.ena = 1;
+ rbdr_cfg.reset = 0;
+ rbdr_cfg.ldwb = 0;
+ rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
+ RBDR_SIZE_SHIFT);
+ rbdr_cfg.avg_con = 0;
+ rbdr_cfg.lines = rbdr->buffsz / 128;
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
+
+ /* Verify proper RBDR reset */
+ head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
+
+ if (head | tail)
+ return NICVF_ERR_RBDR_RESET;
+
+ return NICVF_OK;
+}
+
+uint32_t
+nicvf_qsize_rbdr_roundup(uint32_t val)
+{
+ uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
+ RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
+ RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
+ RBDR_QUEUE_SZ_512K};
+ return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+int
+nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
+ rbdr_pool_get_handler handler,
+ void *opaque, uint32_t max_buffs)
+{
+ struct rbdr_entry_t *desc, *desc0;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+ uint32_t count;
+ nicvf_phys_addr_t phy;
+
+ assert(rbdr != NULL);
+ desc = rbdr->desc;
+ count = 0;
+ /* Don't fill beyond max numbers of desc */
+ while (count < rbdr->qlen_mask) {
+ if (count >= max_buffs)
+ break;
+ desc0 = desc + count;
+ phy = handler(opaque);
+ if (phy) {
+ desc0->full_addr = phy;
+ count++;
+ } else {
+ break;
+ }
+ }
+ nicvf_smp_wmb();
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
+ rbdr->next_tail = rbdr->tail;
+ nicvf_smp_rmb();
+ return 0;
+}
+
+int
+nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
+{
+ return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+}
+
+int
+nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+ uint64_t head, tail;
+ struct sq_cfg sq_cfg;
+
+ sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+
+ /* Disable send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+
+ /* Check if SQ is stopped */
+ if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
+ NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
+ return NICVF_ERR_SQ_DISABLE;
+
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+ head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+ if (head | tail)
+ return NICVF_ERR_SQ_RESET;
+
+ return 0;
+}
+
+int
+nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
+{
+ int ret;
+ struct sq_cfg sq_cfg = {.value = 0};
+
+ ret = nicvf_qset_sq_reclaim(nic, qidx);
+ if (ret)
+ return ret;
+
+ /* Send a mailbox msg to PF to config SQ */
+ if (nicvf_mbox_sq_config(nic, qidx))
+ return NICVF_ERR_SQ_PF_CFG;
+
+ /* Set queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
+
+ /* Enable send queue & set queue size */
+ sq_cfg.ena = 1;
+ sq_cfg.reset = 0;
+ sq_cfg.ldwb = 0;
+ sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
+ sq_cfg.tstmp_bgx_intf = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
+
+ /* Ring doorbell so that H/W restarts processing SQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+
+ return 0;
+}
+
+uint32_t
+nicvf_qsize_sq_roundup(uint32_t val)
+{
+ uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
+ SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
+ SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
+ SND_QUEUE_SZ_64K};
+ return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+int
+nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+ /* Disable receive queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+ return nicvf_mbox_rq_sync(nic);
+}
+
+int
+nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
+{
+ struct pf_rq_cfg pf_rq_cfg = {.value = 0};
+ struct rq_cfg rq_cfg = {.value = 0};
+
+ if (nicvf_qset_rq_reclaim(nic, qidx))
+ return NICVF_ERR_RQ_CLAIM;
+
+ pf_rq_cfg.strip_pre_l2 = 0;
+ /* First cache line of RBDR data will be allocated into L2C */
+ pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
+ pf_rq_cfg.cq_qs = nic->vf_id;
+ pf_rq_cfg.cq_idx = qidx;
+ pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
+ pf_rq_cfg.rbdr_cont_idx = 0;
+ pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
+ pf_rq_cfg.rbdr_strt_idx = 0;
+
+ /* Send a mailbox msg to PF to config RQ */
+ if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
+ return NICVF_ERR_RQ_PF_CFG;
+
+ /* Select Rx backpressure */
+ if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
+ return NICVF_ERR_RQ_BP_CFG;
+
+ /* Send a mailbox msg to PF to config RQ drop */
+ if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
+ return NICVF_ERR_RQ_DROP_CFG;
+
+ /* Enable Receive queue */
+ rq_cfg.ena = 1;
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
+
+ return 0;
+}
+
+int
+nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+ uint64_t tail, head;
+
+ /* Disable completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+ if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
+ return NICVF_ERR_CQ_DISABLE;
+
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
+ head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
+ if (head | tail)
+ return NICVF_ERR_CQ_RESET;
+
+ /* Disable timer threshold (doesn't get reset upon CQ reset) */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ return 0;
+}
+
+int
+nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
+{
+ int ret;
+ struct cq_cfg cq_cfg = {.value = 0};
+
+ ret = nicvf_qset_cq_reclaim(nic, qidx);
+ if (ret)
+ return ret;
+
+ /* Set completion queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
+
+ cq_cfg.ena = 1;
+ cq_cfg.reset = 0;
+ /* Writes of CQE will be allocated into L2C */
+ cq_cfg.caching = 1;
+ cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
+ cq_cfg.avg_con = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ return 0;
+}
+
+uint32_t
+nicvf_qsize_cq_roundup(uint32_t val)
+{
+ uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
+ CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
+ CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
+ CMP_QUEUE_SZ_64K};
+ return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+
+void
+nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
+{
+ uint64_t val;
+
+ val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+ if (enable)
+ val |= (STRIP_FIRST_VLAN << 25);
+ else
+ val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
+
+ nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
+nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
+{
+ int idx;
+ uint64_t addr, val;
+ uint64_t *keyptr = (uint64_t *)key;
+
+ addr = NIC_VNIC_RSS_KEY_0_4;
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ val = nicvf_cpu_to_be_64(*keyptr);
+ nicvf_reg_write(nic, addr, val);
+ addr += sizeof(uint64_t);
+ keyptr++;
+ }
+}
+
+void
+nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
+{
+ int idx;
+ uint64_t addr, val;
+ uint64_t *keyptr = (uint64_t *)key;
+
+ addr = NIC_VNIC_RSS_KEY_0_4;
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ val = nicvf_reg_read(nic, addr);
+ *keyptr = nicvf_be_to_cpu_64(val);
+ addr += sizeof(uint64_t);
+ keyptr++;
+ }
+}
+
+void
+nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
+{
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
+}
+
+uint64_t
+nicvf_rss_get_cfg(struct nicvf *nic)
+{
+ return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+}
+
+int
+nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
+{
+ uint32_t idx;
+ struct nicvf_rss_reta_info *rss = &nic->rss_info;
+
+ /* result will be stored in nic->rss_info.rss_size */
+ if (nicvf_mbox_get_rss_size(nic))
+ return NICVF_ERR_RSS_GET_SZ;
+
+ assert(rss->rss_size > 0);
+ rss->hash_bits = (uint8_t)log2(rss->rss_size);
+ for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
+ rss->ind_tbl[idx] = tbl[idx];
+
+ if (nicvf_mbox_config_rss(nic))
+ return NICVF_ERR_RSS_TBL_UPDATE;
+
+ return NICVF_OK;
+}
+
+int
+nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
+{
+ uint32_t idx;
+ struct nicvf_rss_reta_info *rss = &nic->rss_info;
+
+ /* result will be stored in nic->rss_info.rss_size */
+ if (nicvf_mbox_get_rss_size(nic))
+ return NICVF_ERR_RSS_GET_SZ;
+
+ assert(rss->rss_size > 0);
+ rss->hash_bits = (uint8_t)log2(rss->rss_size);
+ for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
+ tbl[idx] = rss->ind_tbl[idx];
+
+ return NICVF_OK;
+}
+
+int
+nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg)
+{
+ uint32_t idx;
+ uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
+ uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
+ };
+
+ if (nic->cpi_alg != CPI_ALG_NONE)
+ return -EINVAL;
+
+ if (cfg == 0)
+ return -EINVAL;
+
+ /* Update default RSS key and cfg */
+ nicvf_rss_set_key(nic, default_key);
+ nicvf_rss_set_cfg(nic, cfg);
+
+ /* Update default RSS RETA */
+ for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+ default_reta[idx] = idx % qcnt;
+
+ return nicvf_rss_reta_update(nic, default_reta,
+ NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+int
+nicvf_rss_term(struct nicvf *nic)
+{
+ uint32_t idx;
+ uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
+
+ nicvf_rss_set_cfg(nic, 0);
+ /* Redirect the output to 0th queue */
+ for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+ disable_rss[idx] = 0;
+
+ return nicvf_rss_reta_update(nic, disable_rss,
+ NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+int
+nicvf_loopback_config(struct nicvf *nic, bool enable)
+{
+ if (enable && nic->loopback_supported == 0)
+ return NICVF_ERR_LOOPBACK_CFG;
+
+ return nicvf_mbox_loopback_config(nic, enable);
+}
+
+void
+nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
+{
+ stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
+ stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
+ stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
+ stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
+ stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
+ stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
+ stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
+ stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
+ stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
+ stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
+
+ stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
+ stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
+}
+
+void
+nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
+ uint16_t qidx)
+{
+ qstats->q_rx_bytes =
+ nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
+ qstats->q_rx_packets =
+ nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
+}
+
+void
+nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
+ uint16_t qidx)
+{
+ qstats->q_tx_bytes =
+ nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
+ qstats->q_tx_packets =
+ nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
+}
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
new file mode 100644
index 00000000..9db1d30c
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -0,0 +1,240 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _THUNDERX_NICVF_HW_H
+#define _THUNDERX_NICVF_HW_H
+
+#include <stdint.h>
+
+#include "nicvf_hw_defs.h"
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_THUNDERX_PASS1_NICVF 0x0011
+#define PCI_DEVICE_ID_THUNDERX_PASS2_NICVF 0xA034
+#define PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF 0xA11E
+#define PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF 0xA134
+
+#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define NICVF_GET_RX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define NICVF_GET_TX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+#define NICVF_PASS1 (PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF)
+#define NICVF_PASS2 (PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF)
+
+#define NICVF_CAP_TUNNEL_PARSING (1ULL << 0)
+
+enum nicvf_tns_mode {
+ NIC_TNS_BYPASS_MODE,
+ NIC_TNS_MODE,
+};
+
+enum nicvf_err_e {
+ NICVF_OK,
+ NICVF_ERR_SET_QS = -8191,/* -8191 */
+ NICVF_ERR_RESET_QS, /* -8190 */
+ NICVF_ERR_REG_POLL, /* -8189 */
+ NICVF_ERR_RBDR_RESET, /* -8188 */
+ NICVF_ERR_RBDR_DISABLE, /* -8187 */
+ NICVF_ERR_RBDR_PREFETCH, /* -8186 */
+ NICVF_ERR_RBDR_RESET1, /* -8185 */
+ NICVF_ERR_RBDR_RESET2, /* -8184 */
+ NICVF_ERR_RQ_CLAIM, /* -8183 */
+ NICVF_ERR_RQ_PF_CFG, /* -8182 */
+ NICVF_ERR_RQ_BP_CFG, /* -8181 */
+ NICVF_ERR_RQ_DROP_CFG, /* -8180 */
+ NICVF_ERR_CQ_DISABLE, /* -8179 */
+ NICVF_ERR_CQ_RESET, /* -8178 */
+ NICVF_ERR_SQ_DISABLE, /* -8177 */
+ NICVF_ERR_SQ_RESET, /* -8176 */
+ NICVF_ERR_SQ_PF_CFG, /* -8175 */
+ NICVF_ERR_LOOPBACK_CFG, /* -8174 */
+ NICVF_ERR_BASE_INIT, /* -8173 */
+ NICVF_ERR_RSS_TBL_UPDATE,/* -8172 */
+ NICVF_ERR_RSS_GET_SZ, /* -8171 */
+};
+
+typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *opaque);
+
+struct nicvf_hw_rx_qstats {
+ uint64_t q_rx_bytes;
+ uint64_t q_rx_packets;
+};
+
+struct nicvf_hw_tx_qstats {
+ uint64_t q_tx_bytes;
+ uint64_t q_tx_packets;
+};
+
+struct nicvf_hw_stats {
+ uint64_t rx_bytes;
+ uint64_t rx_ucast_frames;
+ uint64_t rx_bcast_frames;
+ uint64_t rx_mcast_frames;
+ uint64_t rx_fcs_errors;
+ uint64_t rx_l2_errors;
+ uint64_t rx_drop_red;
+ uint64_t rx_drop_red_bytes;
+ uint64_t rx_drop_overrun;
+ uint64_t rx_drop_overrun_bytes;
+ uint64_t rx_drop_bcast;
+ uint64_t rx_drop_mcast;
+ uint64_t rx_drop_l3_bcast;
+ uint64_t rx_drop_l3_mcast;
+
+ uint64_t tx_bytes_ok;
+ uint64_t tx_ucast_frames_ok;
+ uint64_t tx_bcast_frames_ok;
+ uint64_t tx_mcast_frames_ok;
+ uint64_t tx_drops;
+};
+
+struct nicvf_rss_reta_info {
+ uint8_t hash_bits;
+ uint16_t rss_size;
+ uint8_t ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+};
+
+/* Common structs used in DPDK and base layer are defined in DPDK layer */
+#include "../nicvf_struct.h"
+
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_rbdr) <= 128);
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_txq) <= 128);
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_rxq) <= 128);
+
+static inline void
+nicvf_reg_write(struct nicvf *nic, uint32_t offset, uint64_t val)
+{
+ nicvf_addr_write(nic->reg_base + offset, val);
+}
+
+static inline uint64_t
+nicvf_reg_read(struct nicvf *nic, uint32_t offset)
+{
+ return nicvf_addr_read(nic->reg_base + offset);
+}
+
+static inline uintptr_t
+nicvf_qset_base(struct nicvf *nic, uint32_t qidx)
+{
+ return nic->reg_base + (qidx << NIC_Q_NUM_SHIFT);
+}
+
+static inline void
+nicvf_queue_reg_write(struct nicvf *nic, uint32_t offset, uint32_t qidx,
+ uint64_t val)
+{
+ nicvf_addr_write(nicvf_qset_base(nic, qidx) + offset, val);
+}
+
+static inline uint64_t
+nicvf_queue_reg_read(struct nicvf *nic, uint32_t offset, uint32_t qidx)
+{
+ return nicvf_addr_read(nicvf_qset_base(nic, qidx) + offset);
+}
+
+static inline void
+nicvf_disable_all_interrupts(struct nicvf *nic)
+{
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, NICVF_INTR_ALL_MASK);
+ nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_ALL_MASK);
+}
+
+static inline uint32_t
+nicvf_hw_version(struct nicvf *nic)
+{
+ return nic->subsystem_device_id;
+}
+
+static inline uint64_t
+nicvf_hw_cap(struct nicvf *nic)
+{
+ return nic->hwcap;
+}
+
+int nicvf_base_init(struct nicvf *nic);
+
+int nicvf_reg_get_count(void);
+int nicvf_reg_poll_interrupts(struct nicvf *nic);
+int nicvf_reg_dump(struct nicvf *nic, uint64_t *data);
+
+int nicvf_qset_config(struct nicvf *nic);
+int nicvf_qset_reclaim(struct nicvf *nic);
+
+int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx);
+int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx);
+int nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
+ rbdr_pool_get_handler handler, void *opaque,
+ uint32_t max_buffs);
+int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx,
+ struct nicvf_rxq *rxq);
+int nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx,
+ struct nicvf_rxq *rxq);
+int nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx,
+ struct nicvf_txq *txq);
+int nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+uint32_t nicvf_qsize_rbdr_roundup(uint32_t val);
+uint32_t nicvf_qsize_cq_roundup(uint32_t val);
+uint32_t nicvf_qsize_sq_roundup(uint32_t val);
+
+void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
+
+int nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg);
+int nicvf_rss_term(struct nicvf *nic);
+
+int nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count);
+int nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count);
+
+void nicvf_rss_set_key(struct nicvf *nic, uint8_t *key);
+void nicvf_rss_get_key(struct nicvf *nic, uint8_t *key);
+
+void nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val);
+uint64_t nicvf_rss_get_cfg(struct nicvf *nic);
+
+int nicvf_loopback_config(struct nicvf *nic, bool enable);
+
+void nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats);
+void nicvf_hw_get_rx_qstats(struct nicvf *nic,
+ struct nicvf_hw_rx_qstats *qstats, uint16_t qidx);
+void nicvf_hw_get_tx_qstats(struct nicvf *nic,
+ struct nicvf_hw_tx_qstats *qstats, uint16_t qidx);
+
+#endif /* _THUNDERX_NICVF_HW_H */
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
new file mode 100644
index 00000000..88ecd175
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -0,0 +1,1219 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _THUNDERX_NICVF_HW_DEFS_H
+#define _THUNDERX_NICVF_HW_DEFS_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/* Virtual function register offsets */
+
+#define NIC_VF_CFG (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
+#define NIC_VF_INT (0x000200)
+#define NIC_VF_INT_W1S (0x000220)
+#define NIC_VF_ENA_W1C (0x000240)
+#define NIC_VF_ENA_W1S (0x000260)
+
+#define NIC_VNIC_RSS_CFG (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
+#define NIC_VNIC_TX_STAT_0_4 (0x004000)
+#define NIC_VNIC_RX_STAT_0_13 (0x004100)
+#define NIC_VNIC_RQ_GEN_CFG (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH (0x010410)
+#define NIC_QSET_CQ_0_7_BASE (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG (0x010600)
+#define NIC_QSET_RQ_0_7_STATUS0 (0x010700)
+#define NIC_QSET_RQ_0_7_STATUS1 (0x010708)
+
+#define NIC_QSET_SQ_0_7_CFG (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH (0x010810)
+#define NIC_QSET_SQ_0_7_BASE (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+#define NIC_QSET_SQ_0_7_STATUS0 (0x010900)
+#define NIC_QSET_SQ_0_7_STATUS1 (0x010908)
+
+#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
+#define NIC_QSET_RBDR_0_1_PRFCH_STATUS (0x010C50)
+
+/* vNIC HW Constants */
+
+#define NIC_Q_NUM_SHIFT 18
+
+#define MAX_QUEUE_SET 128
+#define MAX_RCV_QUEUES_PER_QS 8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
+#define MAX_SND_QUEUES_PER_QS 8
+#define MAX_CMP_QUEUES_PER_QS 8
+
+#define NICVF_INTR_CQ_SHIFT 0
+#define NICVF_INTR_SQ_SHIFT 8
+#define NICVF_INTR_RBDR_SHIFT 16
+#define NICVF_INTR_PKT_DROP_SHIFT 20
+#define NICVF_INTR_TCP_TIMER_SHIFT 21
+#define NICVF_INTR_MBOX_SHIFT 22
+#define NICVF_INTR_QS_ERR_SHIFT 23
+
+#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK (1 << NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK (1 << NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK (1 << NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK (1 << NICVF_INTR_QS_ERR_SHIFT)
+#define NICVF_INTR_ALL_MASK (0x7FFFFF)
+
+#define NICVF_CQ_WR_FULL (1ULL << 26)
+#define NICVF_CQ_WR_DISABLE (1ULL << 25)
+#define NICVF_CQ_WR_FAULT (1ULL << 24)
+#define NICVF_CQ_ERR_MASK (NICVF_CQ_WR_FULL |\
+ NICVF_CQ_WR_DISABLE |\
+ NICVF_CQ_WR_FAULT)
+#define NICVF_CQ_CQE_COUNT_MASK (0xFFFF)
+
+#define NICVF_SQ_ERR_STOPPED (1ULL << 21)
+#define NICVF_SQ_ERR_SEND (1ULL << 20)
+#define NICVF_SQ_ERR_DPE (1ULL << 19)
+#define NICVF_SQ_ERR_MASK (NICVF_SQ_ERR_STOPPED |\
+ NICVF_SQ_ERR_SEND |\
+ NICVF_SQ_ERR_DPE)
+#define NICVF_SQ_STATUS_STOPPED_BIT (21)
+
+#define NICVF_RBDR_FIFO_STATE_SHIFT (62)
+#define NICVF_RBDR_FIFO_STATE_MASK (3ULL << NICVF_RBDR_FIFO_STATE_SHIFT)
+#define NICVF_RBDR_COUNT_MASK (0x7FFFF)
+
+/* Queue reset */
+#define NICVF_CQ_RESET (1ULL << 41)
+#define NICVF_SQ_RESET (1ULL << 17)
+#define NICVF_RBDR_RESET (1ULL << 43)
+
+/* RSS constants */
+#define NIC_MAX_RSS_HASH_BITS (8)
+#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE (5) /* 320 bit key */
+#define RSS_HASH_KEY_BYTE_SIZE (40) /* 320 bit key */
+
+#define RSS_L2_EXTENDED_HASH_ENA (1 << 0)
+#define RSS_IP_ENA (1 << 1)
+#define RSS_TCP_ENA (1 << 2)
+#define RSS_TCP_SYN_ENA (1 << 3)
+#define RSS_UDP_ENA (1 << 4)
+#define RSS_L4_EXTENDED_ENA (1 << 5)
+#define RSS_L3_BI_DIRECTION_ENA (1 << 7)
+#define RSS_L4_BI_DIRECTION_ENA (1 << 8)
+#define RSS_TUN_VXLAN_ENA (1 << 9)
+#define RSS_TUN_GENEVE_ENA (1 << 10)
+#define RSS_TUN_NVGRE_ENA (1 << 11)
+
+#define RBDR_QUEUE_SZ_8K (8 * 1024)
+#define RBDR_QUEUE_SZ_16K (16 * 1024)
+#define RBDR_QUEUE_SZ_32K (32 * 1024)
+#define RBDR_QUEUE_SZ_64K (64 * 1024)
+#define RBDR_QUEUE_SZ_128K (128 * 1024)
+#define RBDR_QUEUE_SZ_256K (256 * 1024)
+#define RBDR_QUEUE_SZ_512K (512 * 1024)
+
+#define RBDR_SIZE_SHIFT (13) /* 8k */
+
+#define SND_QUEUE_SZ_1K (1 * 1024)
+#define SND_QUEUE_SZ_2K (2 * 1024)
+#define SND_QUEUE_SZ_4K (4 * 1024)
+#define SND_QUEUE_SZ_8K (8 * 1024)
+#define SND_QUEUE_SZ_16K (16 * 1024)
+#define SND_QUEUE_SZ_32K (32 * 1024)
+#define SND_QUEUE_SZ_64K (64 * 1024)
+
+#define SND_QSIZE_SHIFT (10) /* 1k */
+
+#define CMP_QUEUE_SZ_1K (1 * 1024)
+#define CMP_QUEUE_SZ_2K (2 * 1024)
+#define CMP_QUEUE_SZ_4K (4 * 1024)
+#define CMP_QUEUE_SZ_8K (8 * 1024)
+#define CMP_QUEUE_SZ_16K (16 * 1024)
+#define CMP_QUEUE_SZ_32K (32 * 1024)
+#define CMP_QUEUE_SZ_64K (64 * 1024)
+
+#define CMP_QSIZE_SHIFT (10) /* 1k */
+
+#define NICVF_QSIZE_MIN_VAL (0)
+#define NICVF_QSIZE_MAX_VAL (6)
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS (64)
+#define NIC_HW_MAX_FRS (9200) /* 9216 max pkt including FCS */
+#define NIC_HW_MAX_SEGS (12)
+
+/* Descriptor alignments */
+#define NICVF_RBDR_BASE_ALIGN_BYTES (128) /* 7 bits */
+#define NICVF_CQ_BASE_ALIGN_BYTES (512) /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES (128) /* 7 bits */
+
+#define NICVF_CQE_RBPTR_WORD (6)
+#define NICVF_CQE_RX2_RBPTR_WORD (7)
+
+#define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
+
+typedef uint64_t nicvf_phys_addr_t;
+
+#ifndef __BYTE_ORDER__
+#error __BYTE_ORDER__ not defined
+#endif
+
+/* vNIC HW Enumerations */
+
+enum nic_send_ld_type_e {
+ NIC_SEND_LD_TYPE_E_LDD,
+ NIC_SEND_LD_TYPE_E_LDT,
+ NIC_SEND_LD_TYPE_E_LDWB,
+ NIC_SEND_LD_TYPE_E_ENUM_LAST,
+};
+
+enum ether_type_algorithm {
+ ETYPE_ALG_NONE,
+ ETYPE_ALG_SKIP,
+ ETYPE_ALG_ENDPARSE,
+ ETYPE_ALG_VLAN,
+ ETYPE_ALG_VLAN_STRIP,
+};
+
+enum layer3_type {
+ L3TYPE_NONE,
+ L3TYPE_GRH,
+ L3TYPE_IPV4 = 0x4,
+ L3TYPE_IPV4_OPTIONS = 0x5,
+ L3TYPE_IPV6 = 0x6,
+ L3TYPE_IPV6_OPTIONS = 0x7,
+ L3TYPE_ET_STOP = 0xD,
+ L3TYPE_OTHER = 0xE,
+};
+
+#define NICVF_L3TYPE_OPTIONS_MASK ((uint8_t)1)
+#define NICVF_L3TYPE_IPVX_MASK ((uint8_t)0x06)
+
+enum layer4_type {
+ L4TYPE_NONE,
+ L4TYPE_IPSEC_ESP,
+ L4TYPE_IPFRAG,
+ L4TYPE_IPCOMP,
+ L4TYPE_TCP,
+ L4TYPE_UDP,
+ L4TYPE_SCTP,
+ L4TYPE_GRE,
+ L4TYPE_ROCE_BTH,
+ L4TYPE_OTHER = 0xE,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+ CPI_ALG_NONE,
+ CPI_ALG_VLAN,
+ CPI_ALG_VLAN16,
+ CPI_ALG_DIFF,
+};
+
+enum rss_algorithm_type {
+ RSS_ALG_NONE,
+ RSS_ALG_PORT,
+ RSS_ALG_IP,
+ RSS_ALG_TCP_IP,
+ RSS_ALG_UDP_IP,
+ RSS_ALG_SCTP_IP,
+ RSS_ALG_GRE_IP,
+ RSS_ALG_ROCE,
+};
+
+enum rss_hash_cfg {
+ RSS_HASH_L2ETC,
+ RSS_HASH_IP,
+ RSS_HASH_TCP,
+ RSS_HASH_TCP_SYN_DIS,
+ RSS_HASH_UDP,
+ RSS_HASH_L4ETC,
+ RSS_HASH_ROCE,
+ RSS_L3_BIDI,
+ RSS_L4_BIDI,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+ CQE_TYPE_INVALID,
+ CQE_TYPE_RX = 0x2,
+ CQE_TYPE_RX_SPLIT = 0x3,
+ CQE_TYPE_RX_TCP = 0x4,
+ CQE_TYPE_SEND = 0x8,
+ CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+ CQE_RX_STATUS_VALID_TCP_CNXT,
+ CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+ CQE_SEND_STATUS_GOOD,
+ CQE_SEND_STATUS_DESC_FAULT = 0x01,
+ CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+ CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+ CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+ CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+ CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+ CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+ CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+ CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+ CQE_SEND_STATUS_DATA_FAULT = 0x86,
+ CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+ CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+ CQE_SEND_STATUS_MEM_FAULT = 0x89,
+ CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+ CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+ CQE_RX_TCP_END_FIN_FLAG_DET,
+ CQE_RX_TCP_END_INVALID_FLAG,
+ CQE_RX_TCP_END_TIMEOUT,
+ CQE_RX_TCP_END_OUT_OF_SEQ,
+ CQE_RX_TCP_END_PKT_ERR,
+ CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+ CQE_RX_ERRLVL_RE,
+ CQE_RX_ERRLVL_L2,
+ CQE_RX_ERRLVL_L3,
+ CQE_RX_ERRLVL_L4,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+ CQE_RX_ERR_RE_NONE,
+ CQE_RX_ERR_RE_PARTIAL,
+ CQE_RX_ERR_RE_JABBER,
+ CQE_RX_ERR_RE_FCS = 0x7,
+ CQE_RX_ERR_RE_TERMINATE = 0x9,
+ CQE_RX_ERR_RE_RX_CTL = 0xb,
+ CQE_RX_ERR_PREL2_ERR = 0x1f,
+ CQE_RX_ERR_L2_FRAGMENT = 0x20,
+ CQE_RX_ERR_L2_OVERRUN = 0x21,
+ CQE_RX_ERR_L2_PFCS = 0x22,
+ CQE_RX_ERR_L2_PUNY = 0x23,
+ CQE_RX_ERR_L2_MAL = 0x24,
+ CQE_RX_ERR_L2_OVERSIZE = 0x25,
+ CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+ CQE_RX_ERR_L2_LENMISM = 0x27,
+ CQE_RX_ERR_L2_PCLP = 0x28,
+ CQE_RX_ERR_IP_NOT = 0x41,
+ CQE_RX_ERR_IP_CHK = 0x42,
+ CQE_RX_ERR_IP_MAL = 0x43,
+ CQE_RX_ERR_IP_MALD = 0x44,
+ CQE_RX_ERR_IP_HOP = 0x45,
+ CQE_RX_ERR_L3_ICRC = 0x46,
+ CQE_RX_ERR_L3_PCLP = 0x47,
+ CQE_RX_ERR_L4_MAL = 0x61,
+ CQE_RX_ERR_L4_CHK = 0x62,
+ CQE_RX_ERR_UDP_LEN = 0x63,
+ CQE_RX_ERR_L4_PORT = 0x64,
+ CQE_RX_ERR_TCP_FLAG = 0x65,
+ CQE_RX_ERR_TCP_OFFSET = 0x66,
+ CQE_RX_ERR_L4_PCLP = 0x67,
+ CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+enum send_l4_csum_type {
+ SEND_L4_CSUM_DISABLE,
+ SEND_L4_CSUM_UDP,
+ SEND_L4_CSUM_TCP,
+};
+
+enum send_crc_alg {
+ SEND_CRCALG_CRC32,
+ SEND_CRCALG_CRC32C,
+ SEND_CRCALG_ICRC,
+};
+
+enum send_load_type {
+ SEND_LD_TYPE_LDD,
+ SEND_LD_TYPE_LDT,
+ SEND_LD_TYPE_LDWB,
+};
+
+enum send_mem_alg_type {
+ SEND_MEMALG_SET,
+ SEND_MEMALG_ADD = 0x08,
+ SEND_MEMALG_SUB = 0x09,
+ SEND_MEMALG_ADDLEN = 0x0A,
+ SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+ SEND_MEMDSZ_B64,
+ SEND_MEMDSZ_B32,
+ SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+ SQ_DESC_TYPE_INVALID,
+ SQ_DESC_TYPE_HEADER,
+ SQ_DESC_TYPE_CRC,
+ SQ_DESC_TYPE_IMMEDIATE,
+ SQ_DESC_TYPE_GATHER,
+ SQ_DESC_TYPE_MEMORY,
+};
+
+enum l3_type_t {
+ L3_NONE,
+ L3_IPV4 = 0x04,
+ L3_IPV4_OPT = 0x05,
+ L3_IPV6 = 0x06,
+ L3_IPV6_OPT = 0x07,
+ L3_ET_STOP = 0x0D,
+ L3_OTHER = 0x0E
+};
+
+enum l4_type_t {
+ L4_NONE,
+ L4_IPSEC_ESP = 0x01,
+ L4_IPFRAG = 0x02,
+ L4_IPCOMP = 0x03,
+ L4_TCP = 0x04,
+ L4_UDP_PASS1 = 0x05,
+ L4_GRE = 0x07,
+ L4_UDP_PASS2 = 0x08,
+ L4_UDP_GENEVE = 0x09,
+ L4_UDP_VXLAN = 0x0A,
+ L4_NVGRE = 0x0C,
+ L4_OTHER = 0x0E
+};
+
+enum vlan_strip {
+ NO_STRIP,
+ STRIP_FIRST_VLAN,
+ STRIP_SECOND_VLAN,
+ STRIP_RESERV,
+};
+
+enum rbdr_state {
+ RBDR_FIFO_STATE_INACTIVE,
+ RBDR_FIFO_STATE_ACTIVE,
+ RBDR_FIFO_STATE_RESET,
+ RBDR_FIFO_STATE_FAIL,
+};
+
+enum rq_cache_allocation {
+ RQ_CACHE_ALLOC_OFF,
+ RQ_CACHE_ALLOC_ALL,
+ RQ_CACHE_ALLOC_FIRST,
+ RQ_CACHE_ALLOC_TWO,
+};
+
+enum cq_rx_errlvl_e {
+ CQ_ERRLVL_MAC,
+ CQ_ERRLVL_L2,
+ CQ_ERRLVL_L3,
+ CQ_ERRLVL_L4,
+};
+
+enum cq_rx_errop_e {
+ CQ_RX_ERROP_RE_NONE,
+ CQ_RX_ERROP_RE_PARTIAL = 0x1,
+ CQ_RX_ERROP_RE_JABBER = 0x2,
+ CQ_RX_ERROP_RE_FCS = 0x7,
+ CQ_RX_ERROP_RE_TERMINATE = 0x9,
+ CQ_RX_ERROP_RE_RX_CTL = 0xb,
+ CQ_RX_ERROP_PREL2_ERR = 0x1f,
+ CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+ CQ_RX_ERROP_L2_OVERRUN = 0x21,
+ CQ_RX_ERROP_L2_PFCS = 0x22,
+ CQ_RX_ERROP_L2_PUNY = 0x23,
+ CQ_RX_ERROP_L2_MAL = 0x24,
+ CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+ CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+ CQ_RX_ERROP_L2_LENMISM = 0x27,
+ CQ_RX_ERROP_L2_PCLP = 0x28,
+ CQ_RX_ERROP_IP_NOT = 0x41,
+ CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+ CQ_RX_ERROP_IP_MAL = 0x43,
+ CQ_RX_ERROP_IP_MALD = 0x44,
+ CQ_RX_ERROP_IP_HOP = 0x45,
+ CQ_RX_ERROP_L3_ICRC = 0x46,
+ CQ_RX_ERROP_L3_PCLP = 0x47,
+ CQ_RX_ERROP_L4_MAL = 0x61,
+ CQ_RX_ERROP_L4_CHK = 0x62,
+ CQ_RX_ERROP_UDP_LEN = 0x63,
+ CQ_RX_ERROP_L4_PORT = 0x64,
+ CQ_RX_ERROP_TCP_FLAG = 0x65,
+ CQ_RX_ERROP_TCP_OFFSET = 0x66,
+ CQ_RX_ERROP_L4_PCLP = 0x67,
+ CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum cq_tx_errop_e {
+ CQ_TX_ERROP_GOOD,
+ CQ_TX_ERROP_DESC_FAULT = 0x10,
+ CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+ CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+ CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+ CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+ CQ_TX_ERROP_LOCK_VIOL = 0x83,
+ CQ_TX_ERROP_DATA_FAULT = 0x84,
+ CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+ CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+ CQ_TX_ERROP_MEM_FAULT = 0x87,
+ CQ_TX_ERROP_CK_OVERLAP = 0x88,
+ CQ_TX_ERROP_CK_OFLOW = 0x89,
+ CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+enum rq_sq_stats_reg_offset {
+ RQ_SQ_STATS_OCTS,
+ RQ_SQ_STATS_PKTS,
+};
+
+enum nic_stat_vnic_rx_e {
+ RX_OCTS,
+ RX_UCAST,
+ RX_BCAST,
+ RX_MCAST,
+ RX_RED,
+ RX_RED_OCTS,
+ RX_ORUN,
+ RX_ORUN_OCTS,
+ RX_FCS,
+ RX_L2ERR,
+ RX_DRP_BCAST,
+ RX_DRP_MCAST,
+ RX_DRP_L3BCAST,
+ RX_DRP_L3MCAST,
+};
+
+enum nic_stat_vnic_tx_e {
+ TX_OCTS,
+ TX_UCAST,
+ TX_BCAST,
+ TX_MCAST,
+ TX_DROP,
+};
+
+/* vNIC HW Register structures */
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t cqe_type:4;
+ uint64_t stdn_fault:1;
+ uint64_t rsvd0:1;
+ uint64_t rq_qs:7;
+ uint64_t rq_idx:3;
+ uint64_t rsvd1:12;
+ uint64_t rss_alg:4;
+ uint64_t rsvd2:4;
+ uint64_t rb_cnt:4;
+ uint64_t vlan_found:1;
+ uint64_t vlan_stripped:1;
+ uint64_t vlan2_found:1;
+ uint64_t vlan2_stripped:1;
+ uint64_t l4_type:4;
+ uint64_t l3_type:4;
+ uint64_t l2_present:1;
+ uint64_t err_level:3;
+ uint64_t err_opcode:8;
+#else
+ uint64_t err_opcode:8;
+ uint64_t err_level:3;
+ uint64_t l2_present:1;
+ uint64_t l3_type:4;
+ uint64_t l4_type:4;
+ uint64_t vlan2_stripped:1;
+ uint64_t vlan2_found:1;
+ uint64_t vlan_stripped:1;
+ uint64_t vlan_found:1;
+ uint64_t rb_cnt:4;
+ uint64_t rsvd2:4;
+ uint64_t rss_alg:4;
+ uint64_t rsvd1:12;
+ uint64_t rq_idx:3;
+ uint64_t rq_qs:7;
+ uint64_t rsvd0:1;
+ uint64_t stdn_fault:1;
+ uint64_t cqe_type:4;
+#endif
+ };
+} cqe_rx_word0_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t pkt_len:16;
+ uint64_t l2_ptr:8;
+ uint64_t l3_ptr:8;
+ uint64_t l4_ptr:8;
+ uint64_t cq_pkt_len:8;
+ uint64_t align_pad:3;
+ uint64_t rsvd3:1;
+ uint64_t chan:12;
+#else
+ uint64_t chan:12;
+ uint64_t rsvd3:1;
+ uint64_t align_pad:3;
+ uint64_t cq_pkt_len:8;
+ uint64_t l4_ptr:8;
+ uint64_t l3_ptr:8;
+ uint64_t l2_ptr:8;
+ uint64_t pkt_len:16;
+#endif
+ };
+} cqe_rx_word1_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t rss_tag:32;
+ uint64_t vlan_tci:16;
+ uint64_t vlan_ptr:8;
+ uint64_t vlan2_ptr:8;
+#else
+ uint64_t vlan2_ptr:8;
+ uint64_t vlan_ptr:8;
+ uint64_t vlan_tci:16;
+ uint64_t rss_tag:32;
+#endif
+ };
+} cqe_rx_word2_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t rb3_sz;
+ uint16_t rb2_sz;
+ uint16_t rb1_sz;
+ uint16_t rb0_sz;
+#else
+ uint16_t rb0_sz;
+ uint16_t rb1_sz;
+ uint16_t rb2_sz;
+ uint16_t rb3_sz;
+#endif
+ };
+} cqe_rx_word3_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t rb7_sz;
+ uint16_t rb6_sz;
+ uint16_t rb5_sz;
+ uint16_t rb4_sz;
+#else
+ uint16_t rb4_sz;
+ uint16_t rb5_sz;
+ uint16_t rb6_sz;
+ uint16_t rb7_sz;
+#endif
+ };
+} cqe_rx_word4_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint16_t rb11_sz;
+ uint16_t rb10_sz;
+ uint16_t rb9_sz;
+ uint16_t rb8_sz;
+#else
+ uint16_t rb8_sz;
+ uint16_t rb9_sz;
+ uint16_t rb10_sz;
+ uint16_t rb11_sz;
+#endif
+ };
+} cqe_rx_word5_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t vlan_found:1;
+ uint64_t vlan_stripped:1;
+ uint64_t vlan2_found:1;
+ uint64_t vlan2_stripped:1;
+ uint64_t rsvd2:3;
+ uint64_t inner_l2:1;
+ uint64_t inner_l4type:4;
+ uint64_t inner_l3type:4;
+ uint64_t vlan_ptr:8;
+ uint64_t vlan2_ptr:8;
+ uint64_t rsvd1:8;
+ uint64_t rsvd0:8;
+ uint64_t inner_l3ptr:8;
+ uint64_t inner_l4ptr:8;
+#else
+ uint64_t inner_l4ptr:8;
+ uint64_t inner_l3ptr:8;
+ uint64_t rsvd0:8;
+ uint64_t rsvd1:8;
+ uint64_t vlan2_ptr:8;
+ uint64_t vlan_ptr:8;
+ uint64_t inner_l3type:4;
+ uint64_t inner_l4type:4;
+ uint64_t inner_l2:1;
+ uint64_t rsvd2:3;
+ uint64_t vlan2_stripped:1;
+ uint64_t vlan2_found:1;
+ uint64_t vlan_stripped:1;
+ uint64_t vlan_found:1;
+#endif
+ };
+} cqe_rx2_word6_t;
+
+struct cqe_rx_t {
+ cqe_rx_word0_t word0;
+ cqe_rx_word1_t word1;
+ cqe_rx_word2_t word2;
+ cqe_rx_word3_t word3;
+ cqe_rx_word4_t word4;
+ cqe_rx_word5_t word5;
+ cqe_rx2_word6_t word6; /* if NIC_PF_RX_CFG[CQE_RX2_ENA] set */
+};
+
+struct cqe_rx_tcp_err_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t cqe_type:4; /* W0 */
+ uint64_t rsvd0:60;
+
+ uint64_t rsvd1:4; /* W1 */
+ uint64_t partial_first:1;
+ uint64_t rsvd2:27;
+ uint64_t rbdr_bytes:8;
+ uint64_t rsvd3:24;
+#else
+ uint64_t rsvd0:60;
+ uint64_t cqe_type:4;
+
+ uint64_t rsvd3:24;
+ uint64_t rbdr_bytes:8;
+ uint64_t rsvd2:27;
+ uint64_t partial_first:1;
+ uint64_t rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t cqe_type:4; /* W0 */
+ uint64_t rsvd0:52;
+ uint64_t cq_tcp_status:8;
+
+ uint64_t rsvd1:32; /* W1 */
+ uint64_t tcp_cntx_bytes:8;
+ uint64_t rsvd2:8;
+ uint64_t tcp_err_bytes:16;
+#else
+ uint64_t cq_tcp_status:8;
+ uint64_t rsvd0:52;
+ uint64_t cqe_type:4; /* W0 */
+
+ uint64_t tcp_err_bytes:16;
+ uint64_t rsvd2:8;
+ uint64_t tcp_cntx_bytes:8;
+ uint64_t rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ uint64_t cqe_type:4; /* W0 */
+ uint64_t rsvd0:4;
+ uint64_t sqe_ptr:16;
+ uint64_t rsvd1:4;
+ uint64_t rsvd2:10;
+ uint64_t sq_qs:7;
+ uint64_t sq_idx:3;
+ uint64_t rsvd3:8;
+ uint64_t send_status:8;
+
+ uint64_t ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ uint64_t send_status:8;
+ uint64_t rsvd3:8;
+ uint64_t sq_idx:3;
+ uint64_t sq_qs:7;
+ uint64_t rsvd2:10;
+ uint64_t rsvd1:4;
+ uint64_t sqe_ptr:16;
+ uint64_t rsvd0:4;
+ uint64_t cqe_type:4; /* W0 */
+
+ uint64_t ptp_timestamp:64;
+#endif
+};
+
+struct cq_entry_type_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t cqe_type:4;
+ uint64_t __pad:60;
+#else
+ uint64_t __pad:60;
+ uint64_t cqe_type:4;
+#endif
+};
+
+union cq_entry_t {
+ uint64_t u[64];
+ struct cq_entry_type_t type;
+ struct cqe_rx_t rx_hdr;
+ struct cqe_rx_tcp_t rx_tcp_hdr;
+ struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+ struct cqe_send_t cqe_send;
+};
+
+NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
+
+struct rbdr_entry_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ union {
+ struct {
+ uint64_t rsvd0:15;
+ uint64_t buf_addr:42;
+ uint64_t cache_align:7;
+ };
+ nicvf_phys_addr_t full_addr;
+ };
+#else
+ union {
+ struct {
+ uint64_t cache_align:7;
+ uint64_t buf_addr:42;
+ uint64_t rsvd0:15;
+ };
+ nicvf_phys_addr_t full_addr;
+ };
+#endif
+};
+
+NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t tcp_pkt_cnt:12;
+ uint64_t rsvd1:4;
+ uint64_t align_hdr_bytes:4;
+ uint64_t align_ptr_bytes:4;
+ uint64_t ptr_bytes:16;
+ uint64_t rsvd2:24;
+ uint64_t cqe_type:4;
+ uint64_t rsvd0:54;
+ uint64_t tcp_end_reason:2;
+ uint64_t tcp_status:4;
+#else
+ uint64_t tcp_status:4;
+ uint64_t tcp_end_reason:2;
+ uint64_t rsvd0:54;
+ uint64_t cqe_type:4;
+ uint64_t rsvd2:24;
+ uint64_t ptr_bytes:16;
+ uint64_t align_ptr_bytes:4;
+ uint64_t align_hdr_bytes:4;
+ uint64_t rsvd1:4;
+ uint64_t tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+ uint64_t opaque:32;
+ uint64_t rss_flow:8;
+ uint64_t skip_length:6;
+ uint64_t disable_rss:1;
+ uint64_t disable_tcp_reassembly:1;
+ uint64_t nodrop:1;
+ uint64_t dest_alg:2;
+ uint64_t rsvd0:2;
+ uint64_t dest_rq:11;
+};
+
+struct sq_crc_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t rsvd1:32;
+ uint64_t crc_ival:32;
+ uint64_t subdesc_type:4;
+ uint64_t crc_alg:2;
+ uint64_t rsvd0:10;
+ uint64_t crc_insert_pos:16;
+ uint64_t hdr_start:16;
+ uint64_t crc_len:16;
+#else
+ uint64_t crc_len:16;
+ uint64_t hdr_start:16;
+ uint64_t crc_insert_pos:16;
+ uint64_t rsvd0:10;
+ uint64_t crc_alg:2;
+ uint64_t subdesc_type:4;
+ uint64_t crc_ival:32;
+ uint64_t rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t subdesc_type:4; /* W0 */
+ uint64_t ld_type:2;
+ uint64_t rsvd0:42;
+ uint64_t size:16;
+
+ uint64_t rsvd1:15; /* W1 */
+ uint64_t addr:49;
+#else
+ uint64_t size:16;
+ uint64_t rsvd0:42;
+ uint64_t ld_type:2;
+ uint64_t subdesc_type:4; /* W0 */
+
+ uint64_t addr:49;
+ uint64_t rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t subdesc_type:4; /* W0 */
+ uint64_t rsvd0:46;
+ uint64_t len:14;
+
+ uint64_t data:64; /* W1 */
+#else
+ uint64_t len:14;
+ uint64_t rsvd0:46;
+ uint64_t subdesc_type:4; /* W0 */
+
+ uint64_t data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t subdesc_type:4; /* W0 */
+ uint64_t mem_alg:4;
+ uint64_t mem_dsz:2;
+ uint64_t wmem:1;
+ uint64_t rsvd0:21;
+ uint64_t offset:32;
+
+ uint64_t rsvd1:15; /* W1 */
+ uint64_t addr:49;
+#else
+ uint64_t offset:32;
+ uint64_t rsvd0:21;
+ uint64_t wmem:1;
+ uint64_t mem_dsz:2;
+ uint64_t mem_alg:4;
+ uint64_t subdesc_type:4; /* W0 */
+
+ uint64_t addr:49;
+ uint64_t rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t subdesc_type:4;
+ uint64_t tso:1;
+ uint64_t post_cqe:1; /* Post CQE on no error also */
+ uint64_t dont_send:1;
+ uint64_t tstmp:1;
+ uint64_t subdesc_cnt:8;
+ uint64_t csum_l4:2;
+ uint64_t csum_l3:1;
+ uint64_t csum_inner_l4:2;
+ uint64_t csum_inner_l3:1;
+ uint64_t rsvd0:2;
+ uint64_t l4_offset:8;
+ uint64_t l3_offset:8;
+ uint64_t rsvd1:4;
+ uint64_t tot_len:20; /* W0 */
+
+ uint64_t rsvd2:24;
+ uint64_t inner_l4_offset:8;
+ uint64_t inner_l3_offset:8;
+ uint64_t tso_start:8;
+ uint64_t rsvd3:2;
+ uint64_t tso_max_paysize:14; /* W1 */
+#else
+ uint64_t tot_len:20;
+ uint64_t rsvd1:4;
+ uint64_t l3_offset:8;
+ uint64_t l4_offset:8;
+ uint64_t rsvd0:2;
+ uint64_t csum_inner_l3:1;
+ uint64_t csum_inner_l4:2;
+ uint64_t csum_l3:1;
+ uint64_t csum_l4:2;
+ uint64_t subdesc_cnt:8;
+ uint64_t tstmp:1;
+ uint64_t dont_send:1;
+ uint64_t post_cqe:1; /* Post CQE on no error also */
+ uint64_t tso:1;
+ uint64_t subdesc_type:4; /* W0 */
+
+ uint64_t tso_max_paysize:14;
+ uint64_t rsvd3:2;
+ uint64_t tso_start:8;
+ uint64_t inner_l3_offset:8;
+ uint64_t inner_l4_offset:8;
+ uint64_t rsvd2:24; /* W1 */
+#endif
+};
+
+/* Each sq entry is 128 bits wide */
+union sq_entry_t {
+ uint64_t buff[2];
+ struct sq_hdr_subdesc hdr;
+ struct sq_imm_subdesc imm;
+ struct sq_gather_subdesc gather;
+ struct sq_crc_subdesc crc;
+ struct sq_mem_subdesc mem;
+};
+
+NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
+
+/* Queue config register formats */
+struct rq_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t reserved_2_63:62;
+ uint64_t ena:1;
+ uint64_t reserved_0:1;
+#else
+ uint64_t reserved_0:1;
+ uint64_t ena:1;
+ uint64_t reserved_2_63:62;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct cq_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t reserved_43_63:21;
+ uint64_t ena:1;
+ uint64_t reset:1;
+ uint64_t caching:1;
+ uint64_t reserved_35_39:5;
+ uint64_t qsize:3;
+ uint64_t reserved_25_31:7;
+ uint64_t avg_con:9;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t avg_con:9;
+ uint64_t reserved_25_31:7;
+ uint64_t qsize:3;
+ uint64_t reserved_35_39:5;
+ uint64_t caching:1;
+ uint64_t reset:1;
+ uint64_t ena:1;
+ uint64_t reserved_43_63:21;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct sq_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t reserved_20_63:44;
+ uint64_t ena:1;
+ uint64_t reserved_18_18:1;
+ uint64_t reset:1;
+ uint64_t ldwb:1;
+ uint64_t reserved_11_15:5;
+ uint64_t qsize:3;
+ uint64_t reserved_3_7:5;
+ uint64_t tstmp_bgx_intf:3;
+#else
+ uint64_t tstmp_bgx_intf:3;
+ uint64_t reserved_3_7:5;
+ uint64_t qsize:3;
+ uint64_t reserved_11_15:5;
+ uint64_t ldwb:1;
+ uint64_t reset:1;
+ uint64_t reserved_18_18:1;
+ uint64_t ena:1;
+ uint64_t reserved_20_63:44;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct rbdr_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t reserved_45_63:19;
+ uint64_t ena:1;
+ uint64_t reset:1;
+ uint64_t ldwb:1;
+ uint64_t reserved_36_41:6;
+ uint64_t qsize:4;
+ uint64_t reserved_25_31:7;
+ uint64_t avg_con:9;
+ uint64_t reserved_12_15:4;
+ uint64_t lines:12;
+#else
+ uint64_t lines:12;
+ uint64_t reserved_12_15:4;
+ uint64_t avg_con:9;
+ uint64_t reserved_25_31:7;
+ uint64_t qsize:4;
+ uint64_t reserved_36_41:6;
+ uint64_t ldwb:1;
+ uint64_t reset:1;
+ uint64_t ena: 1;
+ uint64_t reserved_45_63:19;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct pf_qs_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t reserved_32_63:32;
+ uint64_t ena:1;
+ uint64_t reserved_27_30:4;
+ uint64_t sq_ins_ena:1;
+ uint64_t sq_ins_pos:6;
+ uint64_t lock_ena:1;
+ uint64_t lock_viol_cqe_ena:1;
+ uint64_t send_tstmp_ena:1;
+ uint64_t be:1;
+ uint64_t reserved_7_15:9;
+ uint64_t vnic:7;
+#else
+ uint64_t vnic:7;
+ uint64_t reserved_7_15:9;
+ uint64_t be:1;
+ uint64_t send_tstmp_ena:1;
+ uint64_t lock_viol_cqe_ena:1;
+ uint64_t lock_ena:1;
+ uint64_t sq_ins_pos:6;
+ uint64_t sq_ins_ena:1;
+ uint64_t reserved_27_30:4;
+ uint64_t ena:1;
+ uint64_t reserved_32_63:32;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct pf_rq_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t reserved1:1;
+ uint64_t reserved0:34;
+ uint64_t strip_pre_l2:1;
+ uint64_t caching:2;
+ uint64_t cq_qs:7;
+ uint64_t cq_idx:3;
+ uint64_t rbdr_cont_qs:7;
+ uint64_t rbdr_cont_idx:1;
+ uint64_t rbdr_strt_qs:7;
+ uint64_t rbdr_strt_idx:1;
+#else
+ uint64_t rbdr_strt_idx:1;
+ uint64_t rbdr_strt_qs:7;
+ uint64_t rbdr_cont_idx:1;
+ uint64_t rbdr_cont_qs:7;
+ uint64_t cq_idx:3;
+ uint64_t cq_qs:7;
+ uint64_t caching:2;
+ uint64_t strip_pre_l2:1;
+ uint64_t reserved0:34;
+ uint64_t reserved1:1;
+#endif
+ };
+ uint64_t value;
+}; };
+
+struct pf_rq_drop_cfg { union { struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint64_t rbdr_red:1;
+ uint64_t cq_red:1;
+ uint64_t reserved3:14;
+ uint64_t rbdr_pass:8;
+ uint64_t rbdr_drop:8;
+ uint64_t reserved2:8;
+ uint64_t cq_pass:8;
+ uint64_t cq_drop:8;
+ uint64_t reserved1:8;
+#else
+ uint64_t reserved1:8;
+ uint64_t cq_drop:8;
+ uint64_t cq_pass:8;
+ uint64_t reserved2:8;
+ uint64_t rbdr_drop:8;
+ uint64_t rbdr_pass:8;
+ uint64_t reserved3:14;
+ uint64_t cq_red:1;
+ uint64_t rbdr_red:1;
+#endif
+ };
+ uint64_t value;
+}; };
+
+#endif /* _THUNDERX_NICVF_HW_DEFS_H */
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
new file mode 100644
index 00000000..9c5cd834
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -0,0 +1,418 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "nicvf_plat.h"
+
+#define NICVF_MBOX_PF_RESPONSE_DELAY_US (1000)
+
+static const char *mbox_message[NIC_MBOX_MSG_MAX] = {
+ [NIC_MBOX_MSG_INVALID] = "NIC_MBOX_MSG_INVALID",
+ [NIC_MBOX_MSG_READY] = "NIC_MBOX_MSG_READY",
+ [NIC_MBOX_MSG_ACK] = "NIC_MBOX_MSG_ACK",
+ [NIC_MBOX_MSG_NACK] = "NIC_MBOX_MSG_ACK",
+ [NIC_MBOX_MSG_QS_CFG] = "NIC_MBOX_MSG_QS_CFG",
+ [NIC_MBOX_MSG_RQ_CFG] = "NIC_MBOX_MSG_RQ_CFG",
+ [NIC_MBOX_MSG_SQ_CFG] = "NIC_MBOX_MSG_SQ_CFG",
+ [NIC_MBOX_MSG_RQ_DROP_CFG] = "NIC_MBOX_MSG_RQ_DROP_CFG",
+ [NIC_MBOX_MSG_SET_MAC] = "NIC_MBOX_MSG_SET_MAC",
+ [NIC_MBOX_MSG_SET_MAX_FRS] = "NIC_MBOX_MSG_SET_MAX_FRS",
+ [NIC_MBOX_MSG_CPI_CFG] = "NIC_MBOX_MSG_CPI_CFG",
+ [NIC_MBOX_MSG_RSS_SIZE] = "NIC_MBOX_MSG_RSS_SIZE",
+ [NIC_MBOX_MSG_RSS_CFG] = "NIC_MBOX_MSG_RSS_CFG",
+ [NIC_MBOX_MSG_RSS_CFG_CONT] = "NIC_MBOX_MSG_RSS_CFG_CONT",
+ [NIC_MBOX_MSG_RQ_BP_CFG] = "NIC_MBOX_MSG_RQ_BP_CFG",
+ [NIC_MBOX_MSG_RQ_SW_SYNC] = "NIC_MBOX_MSG_RQ_SW_SYNC",
+ [NIC_MBOX_MSG_BGX_LINK_CHANGE] = "NIC_MBOX_MSG_BGX_LINK_CHANGE",
+ [NIC_MBOX_MSG_ALLOC_SQS] = "NIC_MBOX_MSG_ALLOC_SQS",
+ [NIC_MBOX_MSG_LOOPBACK] = "NIC_MBOX_MSG_LOOPBACK",
+ [NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
+ [NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE",
+ [NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN",
+};
+
+static inline const char * __attribute__((unused))
+nicvf_mbox_msg_str(int msg)
+{
+ assert(msg >= 0 && msg < NIC_MBOX_MSG_MAX);
+ /* undefined messages */
+ if (mbox_message[msg] == NULL)
+ msg = 0;
+ return mbox_message[msg];
+}
+
+static inline void
+nicvf_mbox_send_msg_to_pf_raw(struct nicvf *nic, struct nic_mbx *mbx)
+{
+ uint64_t *mbx_data;
+ uint64_t mbx_addr;
+ int i;
+
+ mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ mbx_data = (uint64_t *)mbx;
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ nicvf_reg_write(nic, mbx_addr, *mbx_data);
+ mbx_data++;
+ mbx_addr += sizeof(uint64_t);
+ }
+ nicvf_mbox_log("msg sent %s (VF%d)",
+ nicvf_mbox_msg_str(mbx->msg.msg), nic->vf_id);
+}
+
+static inline void
+nicvf_mbox_send_async_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
+{
+ nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
+ /* Messages without ack are racy!*/
+ nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
+}
+
+static inline int
+nicvf_mbox_send_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx)
+{
+ long timeout;
+ long sleep = 10;
+ int i, retry = 5;
+
+ for (i = 0; i < retry; i++) {
+ nic->pf_acked = false;
+ nic->pf_nacked = false;
+ nicvf_smp_wmb();
+
+ nicvf_mbox_send_msg_to_pf_raw(nic, mbx);
+ /* Give some time to get PF response */
+ nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
+ timeout = NIC_MBOX_MSG_TIMEOUT;
+ while (timeout > 0) {
+ /* Periodic poll happens from nicvf_interrupt() */
+ nicvf_smp_rmb();
+
+ if (nic->pf_nacked)
+ return -EINVAL;
+ if (nic->pf_acked)
+ return 0;
+
+ nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US);
+ timeout -= sleep;
+ }
+ nicvf_log_error("PF didn't ack to msg 0x%02x %s VF%d (%d/%d)",
+ mbx->msg.msg, nicvf_mbox_msg_str(mbx->msg.msg),
+ nic->vf_id, i, retry);
+ }
+ return -EBUSY;
+}
+
+
+int
+nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+ struct nic_mbx mbx;
+ uint64_t *mbx_data = (uint64_t *)&mbx;
+ uint64_t mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ size_t i;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nicvf_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(uint64_t);
+ }
+
+ /* Overwrite the message so we won't receive it again */
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1, 0x0);
+
+ nicvf_mbox_log("msg received id=0x%hhx %s (VF%d)", mbx.msg.msg,
+ nicvf_mbox_msg_str(mbx.msg.msg), nic->vf_id);
+
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+ nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+ nic->node = mbx.nic_cfg.node_id;
+ nic->sqs_mode = mbx.nic_cfg.sqs_mode;
+ nic->loopback_supported = mbx.nic_cfg.loopback_supported;
+ ether_addr_copy((struct ether_addr *)mbx.nic_cfg.mac_addr,
+ (struct ether_addr *)nic->mac_addr);
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_ACK:
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_NACK:
+ nic->pf_nacked = true;
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ nic->pf_acked = true;
+ break;
+ default:
+ nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s",
+ mbx.msg.msg, nicvf_mbox_msg_str(mbx.msg.msg));
+ break;
+ }
+ nicvf_smp_wmb();
+
+ return mbx.msg.msg;
+}
+
+/*
+ * Checks if VF is able to communicate with PF
+ * and also gets the VNIC number this VF is associated to.
+ */
+int
+nicvf_mbox_check_pf_ready(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = {.msg = NIC_MBOX_MSG_READY} };
+
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_set_mac_addr(struct nicvf *nic,
+ const uint8_t mac[NICVF_MAC_ADDR_SIZE])
+{
+ struct nic_mbx mbx = { .msg = {0} };
+ int i;
+
+ mbx.msg.msg = NIC_MBOX_MSG_SET_MAC;
+ mbx.mac.vf_id = nic->vf_id;
+ for (i = 0; i < 6; i++)
+ mbx.mac.mac_addr[i] = mac[i];
+
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_CPI_CFG;
+ mbx.cpi_cfg.vf_id = nic->vf_id;
+ mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+ mbx.cpi_cfg.rq_cnt = qcnt;
+
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_get_rss_size(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.vf_id = nic->vf_id;
+
+ /* Result will be stored in nic->rss_info.rss_size */
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_config_rss(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+ struct nicvf_rss_reta_info *rss = &nic->rss_info;
+ size_t tot_len = rss->rss_size;
+ size_t cur_len;
+ size_t cur_idx = 0;
+ size_t i;
+
+ mbx.rss_cfg.vf_id = nic->vf_id;
+ mbx.rss_cfg.hash_bits = rss->hash_bits;
+ mbx.rss_cfg.tbl_len = 0;
+ mbx.rss_cfg.tbl_offset = 0;
+
+ while (cur_idx < tot_len) {
+ cur_len = nicvf_min(tot_len - cur_idx,
+ (size_t)RSS_IND_TBL_LEN_PER_MBX_MSG);
+ mbx.msg.msg = (cur_idx > 0) ?
+ NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+ mbx.rss_cfg.tbl_offset = cur_idx;
+ mbx.rss_cfg.tbl_len = cur_len;
+ for (i = 0; i < cur_len; i++)
+ mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[cur_idx++];
+
+ if (nicvf_mbox_send_msg_to_pf(nic, &mbx))
+ return NICVF_ERR_RSS_TBL_UPDATE;
+ }
+
+ return 0;
+}
+
+int
+nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
+ struct pf_rq_cfg *pf_rq_cfg)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_CFG;
+ mbx.rq.qs_num = nic->vf_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = pf_rq_cfg->value;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_SQ_CFG;
+ mbx.sq.qs_num = nic->vf_id;
+ mbx.sq.sq_num = qidx;
+ mbx.sq.sqs_mode = nic->sqs_mode;
+ mbx.sq.cfg = (nic->vf_id << 3) | qidx;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ qs_cfg->be = 1;
+#endif
+ /* Send a mailbox msg to PF to config Qset */
+ mbx.msg.msg = NIC_MBOX_MSG_QS_CFG;
+ mbx.qs.num = nic->vf_id;
+ mbx.qs.cfg = qs_cfg->value;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+ struct pf_rq_drop_cfg *drop_cfg;
+
+ /* Enable CQ drop to reserve sufficient CQEs for all tx packets */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+ mbx.rq.qs_num = nic->vf_id;
+ mbx.rq.rq_num = qidx;
+ drop_cfg = (struct pf_rq_drop_cfg *)&mbx.rq.cfg;
+ drop_cfg->value = 0;
+ if (enable) {
+ drop_cfg->cq_red = 1;
+ drop_cfg->cq_drop = 2;
+ }
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_sync(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ /* Make sure all packets in the pipeline are written back into mem */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+ mbx.rq.cfg = 0;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+ mbx.rq.qs_num = nic->vf_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = 0;
+ if (enable)
+ mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (nic->vf_id << 0);
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_loopback_config(struct nicvf *nic, bool enable)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
+ mbx.lbk.vf_id = nic->vf_id;
+ mbx.lbk.enable = enable;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
+nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
+ uint8_t tx_stat_mask, uint16_t rq_stat_mask,
+ uint16_t sq_stat_mask)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rx_stat_mask = rx_stat_mask;
+ mbx.reset_stat.tx_stat_mask = tx_stat_mask;
+ mbx.reset_stat.rq_stat_mask = rq_stat_mask;
+ mbx.reset_stat.sq_stat_mask = sq_stat_mask;
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+void
+nicvf_mbox_shutdown(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+ nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+void
+nicvf_mbox_cfg_done(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ nicvf_mbox_send_async_msg_to_pf(nic, &mbx);
+}
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
new file mode 100644
index 00000000..7c0c6a97
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -0,0 +1,232 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __THUNDERX_NICVF_MBOX__
+#define __THUNDERX_NICVF_MBOX__
+
+#include <stdint.h>
+
+#include "nicvf_plat.h"
+
+/* PF <--> VF Mailbox communication
+ * Two 64bit registers are shared between PF and VF for each VF
+ * Writing into second register means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define NIC_PF_VF_MAILBOX_SIZE 2
+#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
+
+/* Mailbox message types */
+#define NIC_MBOX_MSG_INVALID 0x00 /* Invalid message */
+#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
+#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
+#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
+#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
+#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
+#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
+#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
+#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
+#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
+#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
+#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
+#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
+#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
+#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
+#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
+#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
+#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
+#define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */
+
+/* Get vNIC VF configuration */
+struct nic_cfg_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint8_t node_id;
+ bool tns_mode:1;
+ bool sqs_mode:1;
+ bool loopback_supported:1;
+ uint8_t mac_addr[NICVF_MAC_ADDR_SIZE];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+ uint8_t msg;
+ uint8_t num;
+ uint8_t sqs_count;
+ uint64_t cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+ uint8_t msg;
+ uint8_t qs_num;
+ uint8_t rq_num;
+ uint64_t cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+ uint8_t msg;
+ uint8_t qs_num;
+ uint8_t sq_num;
+ bool sqs_mode;
+ uint64_t cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint8_t mac_addr[NICVF_MAC_ADDR_SIZE];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint16_t max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint8_t rq_cnt;
+ uint8_t cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint16_t ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+ uint8_t msg;
+ uint8_t vf_id;
+ uint8_t hash_bits;
+ uint8_t tbl_len;
+ uint8_t tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
+ uint8_t ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+ uint8_t msg;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint32_t speed;
+};
+
+/* Set interface in loopback mode */
+struct set_loopback {
+ uint8_t msg;
+ uint8_t vf_id;
+ bool enable;
+};
+
+/* Reset statistics counters */
+struct reset_stat_cfg {
+ uint8_t msg;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
+ uint16_t rx_stat_mask;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
+ uint8_t tx_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
+ */
+ uint16_t rq_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
+ */
+ uint16_t sq_stat_mask;
+};
+
+struct nic_mbx {
+/* 128 bit shared memory between PF and each VF */
+union {
+ struct { uint8_t msg; } msg;
+ struct nic_cfg_msg nic_cfg;
+ struct qs_cfg_msg qs;
+ struct rq_cfg_msg rq;
+ struct sq_cfg_msg sq;
+ struct set_mac_msg mac;
+ struct set_frs_msg frs;
+ struct cpi_cfg_msg cpi_cfg;
+ struct rss_sz_msg rss_size;
+ struct rss_cfg_msg rss_cfg;
+ struct bgx_link_status link_status;
+ struct set_loopback lbk;
+ struct reset_stat_cfg reset_stat;
+};
+};
+
+NICVF_STATIC_ASSERT(sizeof(struct nic_mbx) <= 16);
+
+int nicvf_handle_mbx_intr(struct nicvf *nic);
+int nicvf_mbox_check_pf_ready(struct nicvf *nic);
+int nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg);
+int nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
+ struct pf_rq_cfg *pf_rq_cfg);
+int nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx);
+int nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable);
+int nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable);
+int nicvf_mbox_set_mac_addr(struct nicvf *nic,
+ const uint8_t mac[NICVF_MAC_ADDR_SIZE]);
+int nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt);
+int nicvf_mbox_get_rss_size(struct nicvf *nic);
+int nicvf_mbox_config_rss(struct nicvf *nic);
+int nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu);
+int nicvf_mbox_rq_sync(struct nicvf *nic);
+int nicvf_mbox_loopback_config(struct nicvf *nic, bool enable);
+int nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask,
+ uint8_t tx_stat_mask, uint16_t rq_stat_mask, uint16_t sq_stat_mask);
+void nicvf_mbox_shutdown(struct nicvf *nic);
+void nicvf_mbox_cfg_done(struct nicvf *nic);
+
+#endif /* __THUNDERX_NICVF_MBOX__ */
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
new file mode 100644
index 00000000..83c1844d
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -0,0 +1,132 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _THUNDERX_NICVF_H
+#define _THUNDERX_NICVF_H
+
+/* Platform/OS/arch specific abstractions */
+
+/* log */
+#include <rte_log.h>
+#include "../nicvf_logs.h"
+
+#define nicvf_log_error(s, ...) PMD_DRV_LOG(ERR, s, ##__VA_ARGS__)
+
+#define nicvf_log_debug(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#define nicvf_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__)
+
+#define nicvf_log(s, ...) fprintf(stderr, s, ##__VA_ARGS__)
+
+/* delay */
+#include <rte_cycles.h>
+#define nicvf_delay_us(x) rte_delay_us(x)
+
+/* barrier */
+#include <rte_atomic.h>
+#define nicvf_smp_wmb() rte_smp_wmb()
+#define nicvf_smp_rmb() rte_smp_rmb()
+
+/* utils */
+#include <rte_common.h>
+#define nicvf_min(x, y) RTE_MIN(x, y)
+
+/* byte order */
+#include <rte_byteorder.h>
+#define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x)
+#define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x)
+
+/* Constants */
+#include <rte_ether.h>
+#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define nicvf_prefetch_store_keep(_ptr) ({\
+ asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
+
+static inline void __attribute__((always_inline))
+nicvf_addr_write(uintptr_t addr, uint64_t val)
+{
+ asm volatile(
+ "str %x[val], [%x[addr]]"
+ :
+ : [val] "r" (val), [addr] "r" (addr));
+}
+
+static inline uint64_t __attribute__((always_inline))
+nicvf_addr_read(uintptr_t addr)
+{
+ uint64_t val;
+
+ asm volatile(
+ "ldr %x[val], [%x[addr]]"
+ : [val] "=r" (val)
+ : [addr] "r" (addr));
+ return val;
+}
+
+#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \
+ asm volatile( \
+ "ldp %x[x1], %x[x0], [%x[p1]]" \
+ : [x1]"=r"(reg1), [x0]"=r"(reg2)\
+ : [p1]"r"(addr) \
+ ); })
+
+#else /* non optimized functions for building on non arm64 arch */
+
+#define nicvf_prefetch_store_keep(_ptr) do {} while (0)
+
+static inline void __attribute__((always_inline))
+nicvf_addr_write(uintptr_t addr, uint64_t val)
+{
+ *(volatile uint64_t *)addr = val;
+}
+
+static inline uint64_t __attribute__((always_inline))
+nicvf_addr_read(uintptr_t addr)
+{
+ return *(volatile uint64_t *)addr;
+}
+
+#define NICVF_LOAD_PAIR(reg1, reg2, addr) \
+do { \
+ reg1 = nicvf_addr_read((uintptr_t)addr); \
+ reg2 = nicvf_addr_read((uintptr_t)addr + 8); \
+} while (0)
+
+#endif
+
+#include "nicvf_hw.h"
+#include "nicvf_mbox.h"
+
+#endif /* _THUNDERX_NICVF_H */
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
new file mode 100644
index 00000000..48ed3812
--- /dev/null
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -0,0 +1,1791 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <sys/queue.h>
+#include <sys/timerfd.h>
+
+#include <rte_alarm.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_pci.h>
+#include <rte_tailq.h>
+
+#include "base/nicvf_plat.h"
+
+#include "nicvf_ethdev.h"
+#include "nicvf_rxtx.h"
+#include "nicvf_logs.h"
+
+static void nicvf_dev_stop(struct rte_eth_dev *dev);
+
+static inline int
+nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline void
+nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
+{
+ link->link_status = nic->link_up;
+ link->link_duplex = ETH_LINK_AUTONEG;
+ if (nic->duplex == NICVF_HALF_DUPLEX)
+ link->link_duplex = ETH_LINK_HALF_DUPLEX;
+ else if (nic->duplex == NICVF_FULL_DUPLEX)
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_speed = nic->speed;
+ link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
+}
+
+static void
+nicvf_interrupt(void *arg)
+{
+ struct nicvf *nic = arg;
+
+ if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
+ if (nic->eth_dev->data->dev_conf.intr_conf.lsc)
+ nicvf_set_eth_link_status(nic,
+ &nic->eth_dev->data->dev_link);
+ _rte_eth_dev_callback_process(nic->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC);
+ }
+
+ rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
+ nicvf_interrupt, nic);
+}
+
+static int
+nicvf_periodic_alarm_start(struct nicvf *nic)
+{
+ return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
+ nicvf_interrupt, nic);
+}
+
+static int
+nicvf_periodic_alarm_stop(struct nicvf *nic)
+{
+ return rte_eal_alarm_cancel(nicvf_interrupt, nic);
+}
+
+/*
+ * Return 0 means link status changed, -1 means not changed
+ */
+static int
+nicvf_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct rte_eth_link link;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&link, 0, sizeof(link));
+ nicvf_set_eth_link_status(nic, &link);
+ return nicvf_atomic_write_link_status(dev, &link);
+}
+
+static int
+nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (frame_size > NIC_HW_MAX_FRS)
+ return -EINVAL;
+
+ if (frame_size < NIC_HW_MIN_FRS)
+ return -EINVAL;
+
+ buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Refuse mtu that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (!dev->data->scattered_rx &&
+ (frame_size + 2 * VLAN_TAG_SIZE > buffsz))
+ return -EINVAL;
+
+ /* check <seg size> * <max_seg> >= max_frame */
+ if (dev->data->scattered_rx &&
+ (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
+ return -EINVAL;
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev->data->dev_conf.rxmode.jumbo_frame = 0;
+
+ if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
+ return -EINVAL;
+
+ /* Update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
+ nic->mtu = mtu;
+ return 0;
+}
+
+static int
+nicvf_dev_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ return nicvf_reg_get_count();
+}
+
+static int
+nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
+{
+ uint64_t *data = regs->data;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (data == NULL)
+ return -EINVAL;
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)nicvf_reg_get_count())) {
+ regs->version = nic->vendor_id << 16 | nic->device_id;
+ nicvf_reg_dump(nic, data);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static void
+nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t qidx;
+ struct nicvf_hw_rx_qstats rx_qstats;
+ struct nicvf_hw_tx_qstats tx_qstats;
+ struct nicvf_hw_stats port_stats;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ /* Reading per RX ring stats */
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
+ stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
+ stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
+ }
+
+ /* Reading per TX ring stats */
+ for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) {
+ if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
+ stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
+ stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
+ }
+
+ nicvf_hw_get_stats(nic, &port_stats);
+ stats->ibytes = port_stats.rx_bytes;
+ stats->ipackets = port_stats.rx_ucast_frames;
+ stats->ipackets += port_stats.rx_bcast_frames;
+ stats->ipackets += port_stats.rx_mcast_frames;
+ stats->ierrors = port_stats.rx_l2_errors;
+ stats->imissed = port_stats.rx_drop_red;
+ stats->imissed += port_stats.rx_drop_overrun;
+ stats->imissed += port_stats.rx_drop_bcast;
+ stats->imissed += port_stats.rx_drop_mcast;
+ stats->imissed += port_stats.rx_drop_l3_bcast;
+ stats->imissed += port_stats.rx_drop_l3_mcast;
+
+ stats->obytes = port_stats.tx_bytes_ok;
+ stats->opackets = port_stats.tx_ucast_frames_ok;
+ stats->opackets += port_stats.tx_bcast_frames_ok;
+ stats->opackets += port_stats.tx_mcast_frames_ok;
+ stats->oerrors = port_stats.tx_drops;
+}
+
+static const uint32_t *
+nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ size_t copied;
+ static uint32_t ptypes[32];
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ static const uint32_t ptypes_pass1[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ };
+ static const uint32_t ptypes_pass2[] = {
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_VXLAN,
+ RTE_PTYPE_TUNNEL_NVGRE,
+ };
+ static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
+
+ copied = sizeof(ptypes_pass1);
+ memcpy(ptypes, ptypes_pass1, copied);
+ if (nicvf_hw_version(nic) == NICVF_PASS2) {
+ memcpy((char *)ptypes + copied, ptypes_pass2,
+ sizeof(ptypes_pass2));
+ copied += sizeof(ptypes_pass2);
+ }
+
+ memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
+ if (dev->rx_pkt_burst == nicvf_recv_pkts ||
+ dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
+ return ptypes;
+
+ return NULL;
+}
+
+static void
+nicvf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ int i;
+ uint16_t rxqs = 0, txqs = 0;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rxqs |= (0x3 << (i * 2));
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ txqs |= (0x3 << (i * 2));
+
+ nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
+}
+
+/* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
+static void
+nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
+{
+}
+
+static inline uint64_t
+nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
+{
+ uint64_t nic_rss = 0;
+
+ if (ethdev_rss & ETH_RSS_IPV4)
+ nic_rss |= RSS_IP_ENA;
+
+ if (ethdev_rss & ETH_RSS_IPV6)
+ nic_rss |= RSS_IP_ENA;
+
+ if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
+ nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
+
+ if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
+ nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
+
+ if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
+ nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
+
+ if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
+ nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
+
+ if (ethdev_rss & ETH_RSS_PORT)
+ nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
+
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+ if (ethdev_rss & ETH_RSS_VXLAN)
+ nic_rss |= RSS_TUN_VXLAN_ENA;
+
+ if (ethdev_rss & ETH_RSS_GENEVE)
+ nic_rss |= RSS_TUN_GENEVE_ENA;
+
+ if (ethdev_rss & ETH_RSS_NVGRE)
+ nic_rss |= RSS_TUN_NVGRE_ENA;
+ }
+
+ return nic_rss;
+}
+
+static inline uint64_t
+nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss)
+{
+ uint64_t ethdev_rss = 0;
+
+ if (nic_rss & RSS_IP_ENA)
+ ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
+
+ if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
+ ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_NONFRAG_IPV6_TCP);
+
+ if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
+ ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_NONFRAG_IPV6_UDP);
+
+ if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
+ ethdev_rss |= ETH_RSS_PORT;
+
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+ if (nic_rss & RSS_TUN_VXLAN_ENA)
+ ethdev_rss |= ETH_RSS_VXLAN;
+
+ if (nic_rss & RSS_TUN_GENEVE_ENA)
+ ethdev_rss |= ETH_RSS_GENEVE;
+
+ if (nic_rss & RSS_TUN_NVGRE_ENA)
+ ethdev_rss |= ETH_RSS_NVGRE;
+ }
+ return ethdev_rss;
+}
+
+static int
+nicvf_dev_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ int ret, i, j;
+
+ if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
+ RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
+ return -EINVAL;
+ }
+
+ ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+ if (ret)
+ return ret;
+
+ /* Copy RETA table */
+ for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ reta_conf[i].reta[j] = tbl[j];
+ }
+
+ return 0;
+}
+
+static int
+nicvf_dev_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ int ret, i, j;
+
+ if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
+ RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
+ return -EINVAL;
+ }
+
+ ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+ if (ret)
+ return ret;
+
+ /* Copy RETA table */
+ for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ tbl[j] = reta_conf[i].reta[j];
+ }
+
+ return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+static int
+nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (rss_conf->rss_key)
+ nicvf_rss_get_key(nic, rss_conf->rss_key);
+
+ rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE;
+ rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
+ return 0;
+}
+
+static int
+nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t nic_rss;
+
+ if (rss_conf->rss_key &&
+ rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
+ RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
+ rss_conf->rss_key_len);
+ return -EINVAL;
+ }
+
+ if (rss_conf->rss_key)
+ nicvf_rss_set_key(nic, rss_conf->rss_key);
+
+ nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
+ nicvf_rss_set_cfg(nic, nic_rss);
+ return 0;
+}
+
+static int
+nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx,
+ uint32_t desc_cnt)
+{
+ const struct rte_memzone *rz;
+ uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t);
+
+ rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size,
+ NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
+ if (rz == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
+ return -ENOMEM;
+ }
+
+ memset(rz->addr, 0, ring_size);
+
+ rxq->phys = rz->phys_addr;
+ rxq->desc = rz->addr;
+ rxq->qlen_mask = desc_cnt - 1;
+
+ return 0;
+}
+
+static int
+nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx,
+ uint32_t desc_cnt)
+{
+ const struct rte_memzone *rz;
+ uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t);
+
+ rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size,
+ NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
+ if (rz == NULL) {
+ PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
+ return -ENOMEM;
+ }
+
+ memset(rz->addr, 0, ring_size);
+
+ sq->phys = rz->phys_addr;
+ sq->desc = rz->addr;
+ sq->qlen_mask = desc_cnt - 1;
+
+ return 0;
+}
+
+static int
+nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz)
+{
+ struct nicvf_rbdr *rbdr;
+ const struct rte_memzone *rz;
+ uint32_t ring_size;
+
+ assert(nic->rbdr == NULL);
+ rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (rbdr == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
+ return -ENOMEM;
+ }
+
+ ring_size = sizeof(struct rbdr_entry_t) * desc_cnt;
+ rz = rte_eth_dma_zone_reserve(nic->eth_dev, "rbdr", 0, ring_size,
+ NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
+ if (rz == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
+ return -ENOMEM;
+ }
+
+ memset(rz->addr, 0, ring_size);
+
+ rbdr->phys = rz->phys_addr;
+ rbdr->tail = 0;
+ rbdr->next_tail = 0;
+ rbdr->desc = rz->addr;
+ rbdr->buffsz = buffsz;
+ rbdr->qlen_mask = desc_cnt - 1;
+ rbdr->rbdr_status =
+ nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
+ rbdr->rbdr_door =
+ nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
+
+ nic->rbdr = rbdr;
+ return 0;
+}
+
+static void
+nicvf_rbdr_release_mbuf(struct nicvf *nic, nicvf_phys_addr_t phy)
+{
+ uint16_t qidx;
+ void *obj;
+ struct nicvf_rxq *rxq;
+
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+ rxq = nic->eth_dev->data->rx_queues[qidx];
+ if (rxq->precharge_cnt) {
+ obj = (void *)nicvf_mbuff_phy2virt(phy,
+ rxq->mbuf_phys_off);
+ rte_mempool_put(rxq->pool, obj);
+ rxq->precharge_cnt--;
+ break;
+ }
+ }
+}
+
+static inline void
+nicvf_rbdr_release_mbufs(struct nicvf *nic)
+{
+ uint32_t qlen_mask, head;
+ struct rbdr_entry_t *entry;
+ struct nicvf_rbdr *rbdr = nic->rbdr;
+
+ qlen_mask = rbdr->qlen_mask;
+ head = rbdr->head;
+ while (head != rbdr->tail) {
+ entry = rbdr->desc + head;
+ nicvf_rbdr_release_mbuf(nic, entry->full_addr);
+ head++;
+ head = head & qlen_mask;
+ }
+}
+
+static inline void
+nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
+{
+ uint32_t head;
+
+ head = txq->head;
+ while (head != txq->tail) {
+ if (txq->txbuffs[head]) {
+ rte_pktmbuf_free_seg(txq->txbuffs[head]);
+ txq->txbuffs[head] = NULL;
+ }
+ head++;
+ head = head & txq->qlen_mask;
+ }
+}
+
+static void
+nicvf_tx_queue_reset(struct nicvf_txq *txq)
+{
+ uint32_t txq_desc_cnt = txq->qlen_mask + 1;
+
+ memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
+ memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
+ txq->tail = 0;
+ txq->head = 0;
+ txq->xmit_bufs = 0;
+}
+
+static inline int
+nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf_txq *txq;
+ int ret;
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ txq = dev->data->tx_queues[qidx];
+ txq->pool = NULL;
+ ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret);
+ goto config_sq_error;
+ }
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return ret;
+
+config_sq_error:
+ nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+ return ret;
+}
+
+static inline int
+nicvf_stop_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf_txq *txq;
+ int ret;
+
+ if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ ret = nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim sq %d %d", qidx, ret);
+
+ txq = dev->data->tx_queues[qidx];
+ nicvf_tx_queue_release_mbufs(txq);
+ nicvf_tx_queue_reset(txq);
+
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return ret;
+}
+
+static inline int
+nicvf_configure_cpi(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint16_t qidx, qcnt;
+ int ret;
+
+ /* Count started rx queues */
+ for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
+ if (dev->data->rx_queue_state[qidx] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
+ qcnt++;
+
+ nic->cpi_alg = CPI_ALG_NONE;
+ ret = nicvf_mbox_config_cpi(nic, qcnt);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
+
+ return ret;
+}
+
+static inline int
+nicvf_configure_rss(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint64_t rsshf;
+ int ret = -EINVAL;
+
+ rsshf = nicvf_rss_ethdev_to_nic(nic,
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
+ PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
+ dev->data->dev_conf.rxmode.mq_mode,
+ nic->eth_dev->data->nb_rx_queues,
+ nic->eth_dev->data->dev_conf.lpbk_mode, rsshf);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ ret = nicvf_rss_term(nic);
+ else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ ret = nicvf_rss_config(nic,
+ nic->eth_dev->data->nb_rx_queues, rsshf);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
+
+ return ret;
+}
+
+static int
+nicvf_configure_rss_reta(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ unsigned int idx, qmap_size;
+ uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
+ uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
+
+ if (nic->cpi_alg != CPI_ALG_NONE)
+ return -EINVAL;
+
+ /* Prepare queue map */
+ for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
+ if (dev->data->rx_queue_state[idx] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
+ qmap[qmap_size++] = idx;
+ }
+
+ /* Update default RSS RETA */
+ for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
+ default_reta[idx] = qmap[idx % qmap_size];
+
+ return nicvf_rss_reta_update(nic, default_reta,
+ NIC_MAX_RSS_IDR_TBL_SIZE);
+}
+
+static void
+nicvf_dev_tx_queue_release(void *sq)
+{
+ struct nicvf_txq *txq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = (struct nicvf_txq *)sq;
+ if (txq) {
+ if (txq->txbuffs != NULL) {
+ nicvf_tx_queue_release_mbufs(txq);
+ rte_free(txq->txbuffs);
+ txq->txbuffs = NULL;
+ }
+ rte_free(txq);
+ }
+}
+
+static void
+nicvf_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct nicvf_txq *txq;
+ size_t i;
+ bool multiseg = false;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
+ multiseg = true;
+ break;
+ }
+ }
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (multiseg) {
+ PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
+ dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
+ dev->tx_pkt_burst = nicvf_xmit_pkts;
+ }
+
+ if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
+ PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
+ else
+ PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
+}
+
+static void
+nicvf_set_rx_function(struct rte_eth_dev *dev)
+{
+ if (dev->data->scattered_rx) {
+ PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
+ dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
+ dev->rx_pkt_burst = nicvf_recv_pkts;
+ }
+}
+
+static int
+nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ uint16_t tx_free_thresh;
+ uint8_t is_single_pool;
+ struct nicvf_txq *txq;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
+ PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Tx deferred start is not supported */
+ if (tx_conf->tx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Tx deferred start not supported");
+ return -EINVAL;
+ }
+
+ /* Roundup nb_desc to available qsize and validate max number of desc */
+ nb_desc = nicvf_qsize_sq_roundup(nb_desc);
+ if (nb_desc == 0) {
+ PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
+ return -EINVAL;
+ }
+
+ /* Validate tx_free_thresh */
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh :
+ NICVF_DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh > (nb_desc) ||
+ tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
+ PMD_INIT_LOG(ERR,
+ "tx_free_thresh must be less than the number of TX "
+ "descriptors. (tx_free_thresh=%u port=%d "
+ "queue=%d)", (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)qidx);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->tx_queues[qidx] != NULL) {
+ PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+ qidx);
+ nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]);
+ dev->data->tx_queues[qidx] = NULL;
+ }
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx);
+ return -ENOMEM;
+ }
+
+ txq->nic = nic;
+ txq->queue_id = qidx;
+ txq->tx_free_thresh = tx_free_thresh;
+ txq->txq_flags = tx_conf->txq_flags;
+ txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
+ txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
+ is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
+ txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
+
+ /* Choose optimum free threshold value for multipool case */
+ if (!is_single_pool) {
+ txq->tx_free_thresh = (uint16_t)
+ (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
+ NICVF_TX_FREE_MPOOL_THRESH :
+ tx_conf->tx_free_thresh);
+ txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
+ } else {
+ txq->pool_free = nicvf_single_pool_free_xmited_buffers;
+ }
+
+ /* Allocate software ring */
+ txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
+ nb_desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, nic->node);
+
+ if (txq->txbuffs == NULL) {
+ nicvf_dev_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
+ nicvf_dev_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ nicvf_tx_queue_reset(txq);
+
+ PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
+ qidx, txq, nb_desc, txq->desc, txq->phys);
+
+ dev->data->tx_queues[qidx] = txq;
+ dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static inline void
+nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq)
+{
+ uint32_t rxq_cnt;
+ uint32_t nb_pkts, released_pkts = 0;
+ uint32_t refill_cnt = 0;
+ struct rte_eth_dev *dev = rxq->nic->eth_dev;
+ struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
+
+ if (dev->rx_pkt_burst == NULL)
+ return;
+
+ while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) {
+ nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
+ NICVF_MAX_RX_FREE_THRESH);
+ PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
+ while (nb_pkts) {
+ rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
+ released_pkts++;
+ }
+ }
+
+ refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id);
+ PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
+ released_pkts, refill_cnt);
+}
+
+static void
+nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
+{
+ rxq->head = 0;
+ rxq->available_space = 0;
+ rxq->recv_buffers = 0;
+}
+
+static inline int
+nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct nicvf_rxq *rxq;
+ int ret;
+
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ /* Update rbdr pointer to all rxq */
+ rxq = dev->data->rx_queues[qidx];
+ rxq->shared_rbdr = nic->rbdr;
+
+ ret = nicvf_qset_rq_config(nic, qidx, rxq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret);
+ goto config_rq_error;
+ }
+ ret = nicvf_qset_cq_config(nic, qidx, rxq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret);
+ goto config_cq_error;
+ }
+
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+
+config_cq_error:
+ nicvf_qset_cq_reclaim(nic, qidx);
+config_rq_error:
+ nicvf_qset_rq_reclaim(nic, qidx);
+ return ret;
+}
+
+static inline int
+nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct nicvf_rxq *rxq;
+ int ret, other_error;
+
+ if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ ret = nicvf_qset_rq_reclaim(nic, qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret);
+
+ other_error = ret;
+ rxq = dev->data->rx_queues[qidx];
+ nicvf_rx_queue_release_mbufs(rxq);
+ nicvf_rx_queue_reset(rxq);
+
+ ret = nicvf_qset_cq_reclaim(nic, qidx);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret);
+
+ other_error |= ret;
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return other_error;
+}
+
+static void
+nicvf_dev_rx_queue_release(void *rx_queue)
+{
+ struct nicvf_rxq *rxq = rx_queue;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rxq)
+ rte_free(rxq);
+}
+
+static int
+nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ int ret;
+
+ ret = nicvf_start_rx_queue(dev, qidx);
+ if (ret)
+ return ret;
+
+ ret = nicvf_configure_cpi(dev);
+ if (ret)
+ return ret;
+
+ return nicvf_configure_rss_reta(dev);
+}
+
+static int
+nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ int ret;
+
+ ret = nicvf_stop_rx_queue(dev, qidx);
+ ret |= nicvf_configure_cpi(dev);
+ ret |= nicvf_configure_rss_reta(dev);
+ return ret;
+}
+
+static int
+nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ return nicvf_start_tx_queue(dev, qidx);
+}
+
+static int
+nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
+{
+ return nicvf_stop_tx_queue(dev, qidx);
+}
+
+static int
+nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ uint16_t rx_free_thresh;
+ struct nicvf_rxq *rxq;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Socket id check */
+ if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
+ PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
+ socket_id, nic->node);
+
+ /* Mempool memory should be contiguous */
+ if (mp->nb_mem_chunks != 1) {
+ PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz");
+ return -EINVAL;
+ }
+
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ PMD_INIT_LOG(ERR, "Rx deferred start not supported");
+ return -EINVAL;
+ }
+
+ /* Roundup nb_desc to available qsize and validate max number of desc */
+ nb_desc = nicvf_qsize_cq_roundup(nb_desc);
+ if (nb_desc == 0) {
+ PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
+ return -EINVAL;
+ }
+
+ /* Check rx_free_thresh upper bound */
+ rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
+ rx_conf->rx_free_thresh :
+ NICVF_DEFAULT_RX_FREE_THRESH);
+ if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
+ rx_free_thresh >= nb_desc * .75) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
+ rx_free_thresh);
+ return -EINVAL;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->rx_queues[qidx] != NULL) {
+ PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+ qidx);
+ nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]);
+ dev->data->rx_queues[qidx] = NULL;
+ }
+
+ /* Allocate rxq memory */
+ rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
+ RTE_CACHE_LINE_SIZE, nic->node);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx);
+ return -ENOMEM;
+ }
+
+ rxq->nic = nic;
+ rxq->pool = mp;
+ rxq->queue_id = qidx;
+ rxq->port_id = dev->data->port_id;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->rx_drop_en = rx_conf->rx_drop_en;
+ rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
+ rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
+ rxq->precharge_cnt = 0;
+ rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+
+ /* Alloc completion queue */
+ if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) {
+ PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
+ nicvf_dev_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+
+ nicvf_rx_queue_reset(rxq);
+
+ PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
+ qidx, rxq, mp->name, nb_desc,
+ rte_mempool_avail_count(mp), rxq->phys);
+
+ dev->data->rx_queues[qidx] = rxq;
+ dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->min_rx_bufsize = ETHER_MIN_MTU;
+ dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
+ dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS;
+ dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_vfs = dev->pci_dev->max_vfs;
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+ dev_info->reta_size = nic->rss_info.rss_size;
+ dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
+ dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
+ dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
+ .txq_flags =
+ ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOREFCOUNT |
+ ETH_TXQ_FLAGS_NOMULTMEMP |
+ ETH_TXQ_FLAGS_NOVLANOFFL |
+ ETH_TXQ_FLAGS_NOXSUMSCTP,
+ };
+}
+
+static nicvf_phys_addr_t
+rbdr_rte_mempool_get(void *opaque)
+{
+ uint16_t qidx;
+ uintptr_t mbuf;
+ struct nicvf_rxq *rxq;
+ struct nicvf *nic = nicvf_pmd_priv((struct rte_eth_dev *)opaque);
+
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+ rxq = nic->eth_dev->data->rx_queues[qidx];
+ /* Maintain equal buffer count across all pools */
+ if (rxq->precharge_cnt >= rxq->qlen_mask)
+ continue;
+ rxq->precharge_cnt++;
+ mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
+ if (mbuf)
+ return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
+ }
+ return 0;
+}
+
+static int
+nicvf_dev_start(struct rte_eth_dev *dev)
+{
+ int ret;
+ uint16_t qidx;
+ uint32_t buffsz = 0, rbdrsz = 0;
+ uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
+ uint64_t mbuf_phys_off = 0;
+ struct nicvf_rxq *rxq;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_mbuf *mbuf;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ uint16_t mtu;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Userspace process exited without proper shutdown in last run */
+ if (nicvf_qset_rbdr_active(nic, 0))
+ nicvf_dev_stop(dev);
+
+ /*
+ * Thunderx nicvf PMD can support more than one pool per port only when
+ * 1) Data payload size is same across all the pools in given port
+ * AND
+ * 2) All mbuffs in the pools are from the same hugepage
+ * AND
+ * 3) Mbuff metadata size is same across all the pools in given port
+ *
+ * This is to support existing application that uses multiple pool/port.
+ * But, the purpose of using multipool for QoS will not be addressed.
+ *
+ */
+
+ /* Validate RBDR buff size */
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ if (buffsz % 128) {
+ PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
+ return -EINVAL;
+ }
+ if (rbdrsz == 0)
+ rbdrsz = buffsz;
+ if (rbdrsz != buffsz) {
+ PMD_INIT_LOG(ERR, "buffsz not same, qid=%d (%d/%d)",
+ qidx, rbdrsz, buffsz);
+ return -EINVAL;
+ }
+ }
+
+ /* Validate mempool attributes */
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
+ mbuf = rte_pktmbuf_alloc(rxq->pool);
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "Failed allocate mbuf qid=%d pool=%s",
+ qidx, rxq->pool->name);
+ return -ENOMEM;
+ }
+ rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
+ rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
+ rte_pktmbuf_free(mbuf);
+
+ if (mbuf_phys_off == 0)
+ mbuf_phys_off = rxq->mbuf_phys_off;
+ if (mbuf_phys_off != rxq->mbuf_phys_off) {
+ PMD_INIT_LOG(ERR, "pool params not same,%s %" PRIx64,
+ rxq->pool->name, mbuf_phys_off);
+ return -EINVAL;
+ }
+ }
+
+ /* Check the level of buffers in the pool */
+ total_rxq_desc = 0;
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ /* Count total numbers of rxq descs */
+ total_rxq_desc += rxq->qlen_mask + 1;
+ exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
+ exp_buffs *= nic->eth_dev->data->nb_rx_queues;
+ if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
+ PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
+ rxq->pool->name,
+ rte_mempool_avail_count(rxq->pool),
+ exp_buffs);
+ return -ENOENT;
+ }
+ }
+
+ /* Check RBDR desc overflow */
+ ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
+ if (ret == 0) {
+ PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc");
+ return -ENOMEM;
+ }
+
+ /* Enable qset */
+ ret = nicvf_qset_config(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to enable qset %d", ret);
+ return ret;
+ }
+
+ /* Allocate RBDR and RBDR ring desc */
+ nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
+ ret = nicvf_qset_rbdr_alloc(nic, nb_rbdr_desc, rbdrsz);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc");
+ goto qset_reclaim;
+ }
+
+ /* Enable and configure RBDR registers */
+ ret = nicvf_qset_rbdr_config(nic, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure rbdr %d", ret);
+ goto qset_rbdr_free;
+ }
+
+ /* Fill rte_mempool buffers in RBDR pool and precharge it */
+ ret = nicvf_qset_rbdr_precharge(nic, 0, rbdr_rte_mempool_get,
+ dev, total_rxq_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret);
+ goto qset_rbdr_reclaim;
+ }
+
+ PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR",
+ nic->rbdr->tail, nb_rbdr_desc);
+
+ /* Configure RX queues */
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+ ret = nicvf_start_rx_queue(dev, qidx);
+ if (ret)
+ goto start_rxq_error;
+ }
+
+ /* Configure VLAN Strip */
+ nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+
+ /* Configure TX queues */
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) {
+ ret = nicvf_start_tx_queue(dev, qidx);
+ if (ret)
+ goto start_txq_error;
+ }
+
+ /* Configure CPI algorithm */
+ ret = nicvf_configure_cpi(dev);
+ if (ret)
+ goto start_txq_error;
+
+ /* Configure RSS */
+ ret = nicvf_configure_rss(dev);
+ if (ret)
+ goto qset_rss_error;
+
+ /* Configure loopback */
+ ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
+ goto qset_rss_error;
+ }
+
+ /* Reset all statistics counters attached to this port */
+ ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
+ goto qset_rss_error;
+ }
+
+ /* Setup scatter mode if needed by jumbo */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * VLAN_TAG_SIZE > buffsz)
+ dev->data->scattered_rx = 1;
+ if (rx_conf->enable_scatter)
+ dev->data->scattered_rx = 1;
+
+ /* Setup MTU based on max_rx_pkt_len or default */
+ mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
+ dev->data->dev_conf.rxmode.max_rx_pkt_len
+ - ETHER_HDR_LEN - ETHER_CRC_LEN
+ : ETHER_MTU;
+
+ if (nicvf_dev_set_mtu(dev, mtu)) {
+ PMD_INIT_LOG(ERR, "Failed to set default mtu size");
+ return -EBUSY;
+ }
+
+ /* Configure callbacks based on scatter mode */
+ nicvf_set_tx_function(dev);
+ nicvf_set_rx_function(dev);
+
+ /* Done; Let PF make the BGX's RX and TX switches to ON position */
+ nicvf_mbox_cfg_done(nic);
+ return 0;
+
+qset_rss_error:
+ nicvf_rss_term(nic);
+start_txq_error:
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++)
+ nicvf_stop_tx_queue(dev, qidx);
+start_rxq_error:
+ for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
+ nicvf_stop_rx_queue(dev, qidx);
+qset_rbdr_reclaim:
+ nicvf_qset_rbdr_reclaim(nic, 0);
+ nicvf_rbdr_release_mbufs(nic);
+qset_rbdr_free:
+ if (nic->rbdr) {
+ rte_free(nic->rbdr);
+ nic->rbdr = NULL;
+ }
+qset_reclaim:
+ nicvf_qset_reclaim(nic);
+ return ret;
+}
+
+static void
+nicvf_dev_stop(struct rte_eth_dev *dev)
+{
+ int ret;
+ uint16_t qidx;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Let PF make the BGX's RX and TX switches to OFF position */
+ nicvf_mbox_shutdown(nic);
+
+ /* Disable loopback */
+ ret = nicvf_loopback_config(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
+
+ /* Disable VLAN Strip */
+ nicvf_vlan_hw_strip(nic, 0);
+
+ /* Reclaim sq */
+ for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++)
+ nicvf_stop_tx_queue(dev, qidx);
+
+ /* Reclaim rq */
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++)
+ nicvf_stop_rx_queue(dev, qidx);
+
+ /* Reclaim RBDR */
+ ret = nicvf_qset_rbdr_reclaim(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
+
+ /* Move all charged buffers in RBDR back to pool */
+ if (nic->rbdr != NULL)
+ nicvf_rbdr_release_mbufs(nic);
+
+ /* Reclaim CPI configuration */
+ if (!nic->sqs_mode) {
+ ret = nicvf_mbox_config_cpi(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim CPI config");
+ }
+
+ /* Disable qset */
+ ret = nicvf_qset_config(nic);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
+
+ /* Disable all interrupts */
+ nicvf_disable_all_interrupts(nic);
+
+ /* Free RBDR SW structure */
+ if (nic->rbdr) {
+ rte_free(nic->rbdr);
+ nic->rbdr = NULL;
+ }
+}
+
+static void
+nicvf_dev_close(struct rte_eth_dev *dev)
+{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ nicvf_dev_stop(dev);
+ nicvf_periodic_alarm_stop(nic);
+}
+
+static int
+nicvf_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ struct rte_eth_txmode *txmode = &conf->txmode;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!rte_eal_has_hugepages()) {
+ PMD_INIT_LOG(INFO, "Huge page is not configured");
+ return -EINVAL;
+ }
+
+ if (txmode->mq_mode) {
+ PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ if (!rxmode->hw_strip_crc) {
+ PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
+ rxmode->hw_strip_crc = 1;
+ }
+
+ if (rxmode->hw_ip_checksum) {
+ PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
+ rxmode->hw_ip_checksum = 0;
+ }
+
+ if (rxmode->split_hdr_size) {
+ PMD_INIT_LOG(INFO, "Rxmode does not support split header");
+ return -EINVAL;
+ }
+
+ if (rxmode->hw_vlan_filter) {
+ PMD_INIT_LOG(INFO, "VLAN filter not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->hw_vlan_extend) {
+ PMD_INIT_LOG(INFO, "VLAN extended not supported");
+ return -EINVAL;
+ }
+
+ if (rxmode->enable_lro) {
+ PMD_INIT_LOG(INFO, "LRO not supported");
+ return -EINVAL;
+ }
+
+ if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
+ return -EINVAL;
+ }
+
+ if (conf->dcb_capability_en) {
+ PMD_INIT_LOG(INFO, "DCB enable not supported");
+ return -EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ PMD_INIT_LOG(INFO, "Flow director not supported");
+ return -EINVAL;
+ }
+
+ PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
+ dev->data->port_id, nicvf_hw_cap(nic));
+
+ return 0;
+}
+
+/* Initialize and register driver with DPDK Application */
+static const struct eth_dev_ops nicvf_eth_dev_ops = {
+ .dev_configure = nicvf_dev_configure,
+ .dev_start = nicvf_dev_start,
+ .dev_stop = nicvf_dev_stop,
+ .link_update = nicvf_dev_link_update,
+ .dev_close = nicvf_dev_close,
+ .stats_get = nicvf_dev_stats_get,
+ .stats_reset = nicvf_dev_stats_reset,
+ .promiscuous_enable = nicvf_dev_promisc_enable,
+ .dev_infos_get = nicvf_dev_info_get,
+ .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
+ .mtu_set = nicvf_dev_set_mtu,
+ .reta_update = nicvf_dev_reta_update,
+ .reta_query = nicvf_dev_reta_query,
+ .rss_hash_update = nicvf_dev_rss_hash_update,
+ .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get,
+ .rx_queue_start = nicvf_dev_rx_queue_start,
+ .rx_queue_stop = nicvf_dev_rx_queue_stop,
+ .tx_queue_start = nicvf_dev_tx_queue_start,
+ .tx_queue_stop = nicvf_dev_tx_queue_stop,
+ .rx_queue_setup = nicvf_dev_rx_queue_setup,
+ .rx_queue_release = nicvf_dev_rx_queue_release,
+ .rx_queue_count = nicvf_dev_rx_queue_count,
+ .tx_queue_setup = nicvf_dev_tx_queue_setup,
+ .tx_queue_release = nicvf_dev_tx_queue_release,
+ .get_reg_length = nicvf_dev_get_reg_length,
+ .get_reg = nicvf_dev_get_regs,
+};
+
+static int
+nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ int ret;
+ struct rte_pci_device *pci_dev;
+ struct nicvf *nic = nicvf_pmd_priv(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->dev_ops = &nicvf_eth_dev_ops;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /* Setup callbacks for secondary process */
+ nicvf_set_tx_function(eth_dev);
+ nicvf_set_rx_function(eth_dev);
+ return 0;
+ }
+
+ pci_dev = eth_dev->pci_dev;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ nic->device_id = pci_dev->id.device_id;
+ nic->vendor_id = pci_dev->id.vendor_id;
+ nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ nic->eth_dev = eth_dev;
+
+ PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
+ pci_dev->id.vendor_id, pci_dev->id.device_id,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
+ if (!nic->reg_base) {
+ PMD_INIT_LOG(ERR, "Failed to map BAR0");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ nicvf_disable_all_interrupts(nic);
+
+ ret = nicvf_periodic_alarm_start(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to start period alarm");
+ goto fail;
+ }
+
+ ret = nicvf_mbox_check_pf_ready(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
+ goto alarm_fail;
+ } else {
+ PMD_INIT_LOG(INFO,
+ "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
+ nic->node, nic->vf_id,
+ nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
+ nic->sqs_mode ? "true" : "false",
+ nic->loopback_supported ? "true" : "false"
+ );
+ }
+
+ if (nic->sqs_mode) {
+ PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching...");
+ /* Detach port by returning Positive error number */
+ ret = ENOTSUP;
+ goto alarm_fail;
+ }
+
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
+ ret = -ENOMEM;
+ goto alarm_fail;
+ }
+ if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
+ eth_random_addr(&nic->mac_addr[0]);
+
+ ether_addr_copy((struct ether_addr *)nic->mac_addr,
+ &eth_dev->data->mac_addrs[0]);
+
+ ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to set mac addr");
+ goto malloc_fail;
+ }
+
+ ret = nicvf_base_init(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
+ goto malloc_fail;
+ }
+
+ ret = nicvf_mbox_get_rss_size(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get rss table size");
+ goto malloc_fail;
+ }
+
+ PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->port_id, nic->vendor_id, nic->device_id,
+ nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
+ nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
+
+ return 0;
+
+malloc_fail:
+ rte_free(eth_dev->data->mac_addrs);
+alarm_fail:
+ nicvf_periodic_alarm_stop(nic);
+fail:
+ return ret;
+}
+
+static const struct rte_pci_id pci_id_nicvf_map[] = {
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF,
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct eth_driver rte_nicvf_pmd = {
+ .pci_drv = {
+ .name = "rte_nicvf_pmd",
+ .id_table = pci_id_nicvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ },
+ .eth_dev_init = nicvf_eth_dev_init,
+ .dev_private_size = sizeof(struct nicvf),
+};
+
+static int
+rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s",
+ THUNDERX_NICVF_PMD_VERSION);
+
+ rte_eth_driver_register(&rte_nicvf_pmd);
+ return 0;
+}
+
+static struct rte_driver rte_nicvf_driver = {
+ .name = "nicvf_driver",
+ .type = PMD_PDEV,
+ .init = rte_nicvf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_nicvf_driver);
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
new file mode 100644
index 00000000..59fa19cf
--- /dev/null
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -0,0 +1,106 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __THUNDERX_NICVF_ETHDEV_H__
+#define __THUNDERX_NICVF_ETHDEV_H__
+
+#include <rte_ethdev.h>
+
+#define THUNDERX_NICVF_PMD_VERSION "1.0"
+
+#define NICVF_INTR_POLL_INTERVAL_MS 50
+#define NICVF_HALF_DUPLEX 0x00
+#define NICVF_FULL_DUPLEX 0x01
+#define NICVF_UNKNOWN_DUPLEX 0xff
+
+#define NICVF_RSS_OFFLOAD_PASS1 ( \
+ ETH_RSS_PORT | \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define NICVF_RSS_OFFLOAD_TUNNEL ( \
+ ETH_RSS_VXLAN | \
+ ETH_RSS_GENEVE | \
+ ETH_RSS_NVGRE)
+
+#define NICVF_DEFAULT_RX_FREE_THRESH 224
+#define NICVF_DEFAULT_TX_FREE_THRESH 224
+#define NICVF_TX_FREE_MPOOL_THRESH 16
+#define NICVF_MAX_RX_FREE_THRESH 1024
+#define NICVF_MAX_TX_FREE_THRESH 1024
+
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag */
+
+static inline struct nicvf *
+nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
+{
+ return eth_dev->data->dev_private;
+}
+
+static inline uint64_t
+nicvf_mempool_phy_offset(struct rte_mempool *mp)
+{
+ struct rte_mempool_memhdr *hdr;
+
+ hdr = STAILQ_FIRST(&mp->mem_list);
+ assert(hdr != NULL);
+ return (uint64_t)((uintptr_t)hdr->addr - hdr->phys_addr);
+}
+
+static inline uint16_t
+nicvf_mbuff_meta_length(struct rte_mbuf *mbuf)
+{
+ return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
+}
+
+/*
+ * Simple phy2virt functions assuming mbufs are in a single huge page
+ * V = P + offset
+ * P = V - offset
+ */
+static inline uintptr_t
+nicvf_mbuff_phy2virt(phys_addr_t phy, uint64_t mbuf_phys_off)
+{
+ return (uintptr_t)(phy + mbuf_phys_off);
+}
+
+static inline uintptr_t
+nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
+{
+ return (phys_addr_t)(virt - mbuf_phys_off);
+}
+
+#endif /* __THUNDERX_NICVF_ETHDEV_H__ */
diff --git a/drivers/net/thunderx/nicvf_logs.h b/drivers/net/thunderx/nicvf_logs.h
new file mode 100644
index 00000000..0667d468
--- /dev/null
+++ b/drivers/net/thunderx/nicvf_logs.h
@@ -0,0 +1,83 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __THUNDERX_NICVF_LOGS__
+#define __THUNDERX_NICVF_LOGS__
+
+#include <assert.h>
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define NICVF_RX_ASSERT(x) assert(x)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#define NICVF_RX_ASSERT(x) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define NICVF_TX_ASSERT(x) assert(x)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#define NICVF_TX_ASSERT(x) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#define PMD_DRV_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX
+#define PMD_MBOX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_MBOX_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+#else
+#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0)
+#define PMD_MBOX_FUNC_TRACE() do { } while (0)
+#endif
+
+#endif /* __THUNDERX_NICVF_LOGS__ */
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
new file mode 100644
index 00000000..eb51a72c
--- /dev/null
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -0,0 +1,599 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_log.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+
+#include "base/nicvf_plat.h"
+
+#include "nicvf_ethdev.h"
+#include "nicvf_rxtx.h"
+#include "nicvf_logs.h"
+
+static inline void __hot
+fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
+{
+ /* Local variable sqe to avoid read from sq desc memory*/
+ union sq_entry_t sqe;
+ uint64_t ol_flags;
+
+ /* Fill SQ header descriptor */
+ sqe.buff[0] = 0;
+ sqe.hdr.subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Number of sub-descriptors following this one */
+ sqe.hdr.subdesc_cnt = pkt->nb_segs;
+ sqe.hdr.tot_len = pkt->pkt_len;
+
+ ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK;
+ if (unlikely(ol_flags)) {
+ /* L4 cksum */
+ if (ol_flags & PKT_TX_TCP_CKSUM)
+ sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP;
+ else if (ol_flags & PKT_TX_UDP_CKSUM)
+ sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP;
+ else
+ sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE;
+ sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len;
+
+ /* L3 cksum */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ sqe.hdr.csum_l3 = 1;
+ sqe.hdr.l3_offset = pkt->l2_len;
+ }
+ }
+
+ entry->buff[0] = sqe.buff[0];
+}
+
+void __hot
+nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
+{
+ int j = 0;
+ uint32_t curr_head;
+ uint32_t head = sq->head;
+ struct rte_mbuf **txbuffs = sq->txbuffs;
+ void *obj_p[NICVF_MAX_TX_FREE_THRESH] __rte_cache_aligned;
+
+ curr_head = nicvf_addr_read(sq->sq_head) >> 4;
+ while (head != curr_head) {
+ if (txbuffs[head])
+ obj_p[j++] = txbuffs[head];
+
+ head = (head + 1) & sq->qlen_mask;
+ }
+
+ rte_mempool_put_bulk(sq->pool, obj_p, j);
+ sq->head = curr_head;
+ sq->xmit_bufs -= j;
+ NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
+}
+
+void __hot
+nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq)
+{
+ uint32_t n = 0;
+ uint32_t curr_head;
+ uint32_t head = sq->head;
+ struct rte_mbuf **txbuffs = sq->txbuffs;
+
+ curr_head = nicvf_addr_read(sq->sq_head) >> 4;
+ while (head != curr_head) {
+ if (txbuffs[head]) {
+ rte_pktmbuf_free_seg(txbuffs[head]);
+ n++;
+ }
+
+ head = (head + 1) & sq->qlen_mask;
+ }
+
+ sq->head = curr_head;
+ sq->xmit_bufs -= n;
+ NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
+}
+
+static inline uint32_t __hot
+nicvf_free_tx_desc(struct nicvf_txq *sq)
+{
+ return ((sq->head - sq->tail - 1) & sq->qlen_mask);
+}
+
+/* Send Header + Packet */
+#define TX_DESC_PER_PKT 2
+
+static inline uint32_t __hot
+nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint32_t free_desc = nicvf_free_tx_desc(sq);
+
+ if (free_desc < nb_pkts * TX_DESC_PER_PKT ||
+ sq->xmit_bufs > sq->tx_free_thresh) {
+ if (unlikely(sq->pool == NULL))
+ sq->pool = tx_pkts[0]->pool;
+
+ sq->pool_free(sq);
+ /* Freed now, let see the number of free descs again */
+ free_desc = nicvf_free_tx_desc(sq);
+ }
+ return free_desc;
+}
+
+uint16_t __hot
+nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+ uint32_t free_desc;
+ uint32_t tail;
+ struct nicvf_txq *sq = tx_queue;
+ union sq_entry_t *desc_ptr = sq->desc;
+ struct rte_mbuf **txbuffs = sq->txbuffs;
+ struct rte_mbuf *pkt;
+ uint32_t qlen_mask = sq->qlen_mask;
+
+ tail = sq->tail;
+ free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts);
+
+ for (i = 0; i < nb_pkts && (int)free_desc >= TX_DESC_PER_PKT; i++) {
+ pkt = tx_pkts[i];
+
+ txbuffs[tail] = NULL;
+ fill_sq_desc_header(desc_ptr + tail, pkt);
+ tail = (tail + 1) & qlen_mask;
+
+ txbuffs[tail] = pkt;
+ fill_sq_desc_gather(desc_ptr + tail, pkt);
+ tail = (tail + 1) & qlen_mask;
+ free_desc -= TX_DESC_PER_PKT;
+ }
+
+ sq->tail = tail;
+ sq->xmit_bufs += i;
+ rte_wmb();
+
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ return i;
+}
+
+uint16_t __hot
+nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, k;
+ uint32_t used_desc, next_used_desc, used_bufs, free_desc, tail;
+ struct nicvf_txq *sq = tx_queue;
+ union sq_entry_t *desc_ptr = sq->desc;
+ struct rte_mbuf **txbuffs = sq->txbuffs;
+ struct rte_mbuf *pkt, *seg;
+ uint32_t qlen_mask = sq->qlen_mask;
+ uint16_t nb_segs;
+
+ tail = sq->tail;
+ used_desc = 0;
+ used_bufs = 0;
+
+ free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts);
+
+ for (i = 0; i < nb_pkts; i++) {
+ pkt = tx_pkts[i];
+
+ nb_segs = pkt->nb_segs;
+
+ next_used_desc = used_desc + nb_segs + 1;
+ if (next_used_desc > free_desc)
+ break;
+ used_desc = next_used_desc;
+ used_bufs += nb_segs;
+
+ txbuffs[tail] = NULL;
+ fill_sq_desc_header(desc_ptr + tail, pkt);
+ tail = (tail + 1) & qlen_mask;
+
+ txbuffs[tail] = pkt;
+ fill_sq_desc_gather(desc_ptr + tail, pkt);
+ tail = (tail + 1) & qlen_mask;
+
+ seg = pkt->next;
+ for (k = 1; k < nb_segs; k++) {
+ txbuffs[tail] = seg;
+ fill_sq_desc_gather(desc_ptr + tail, seg);
+ tail = (tail + 1) & qlen_mask;
+ seg = seg->next;
+ }
+ }
+
+ sq->tail = tail;
+ sq->xmit_bufs += used_bufs;
+ rte_wmb();
+
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, used_desc);
+ return nb_pkts;
+}
+
+static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
+ [L3_NONE][L4_NONE] = RTE_PTYPE_UNKNOWN,
+ [L3_NONE][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [L3_NONE][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [L3_NONE][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [L3_NONE][L4_TCP] = RTE_PTYPE_L4_TCP,
+ [L3_NONE][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
+ [L3_NONE][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [L3_NONE][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
+ [L3_NONE][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_NONE][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_NONE][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_IPV4][L4_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [L3_IPV4][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+ [L3_IPV4][L4_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+ [L3_IPV4][L4_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+ [L3_IPV4][L4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [L3_IPV4][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [L3_IPV4][L4_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+ [L3_IPV4][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [L3_IPV4][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_IPV4][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_IPV4][L4_NVGRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_IPV4_OPT][L4_NONE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [L3_IPV4_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L3_IPV4,
+ [L3_IPV4_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+ [L3_IPV4_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+ [L3_IPV4_OPT][L4_TCP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+ [L3_IPV4_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [L3_IPV4_OPT][L4_GRE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [L3_IPV4_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+ [L3_IPV4_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_IPV4_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_IPV4_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_IPV6][L4_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [L3_IPV6][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+ [L3_IPV6][L4_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+ [L3_IPV6][L4_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+ [L3_IPV6][L4_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [L3_IPV6][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [L3_IPV6][L4_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+ [L3_IPV6][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [L3_IPV6][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_IPV6][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_IPV6][L4_NVGRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_IPV6_OPT][L4_NONE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [L3_IPV6_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6_EXT |
+ RTE_PTYPE_L3_IPV4,
+ [L3_IPV6_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+ [L3_IPV6_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+ [L3_IPV6_OPT][L4_TCP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [L3_IPV6_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [L3_IPV6_OPT][L4_GRE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+ [L3_IPV6_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [L3_IPV6_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6_EXT |
+ RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_IPV6_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6_EXT |
+ RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_IPV6_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV6_EXT |
+ RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_ET_STOP][L4_NONE] = RTE_PTYPE_UNKNOWN,
+ [L3_ET_STOP][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [L3_ET_STOP][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [L3_ET_STOP][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [L3_ET_STOP][L4_TCP] = RTE_PTYPE_L4_TCP,
+ [L3_ET_STOP][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
+ [L3_ET_STOP][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [L3_ET_STOP][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
+ [L3_ET_STOP][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_ET_STOP][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_ET_STOP][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+ [L3_OTHER][L4_NONE] = RTE_PTYPE_UNKNOWN,
+ [L3_OTHER][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+ [L3_OTHER][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
+ [L3_OTHER][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
+ [L3_OTHER][L4_TCP] = RTE_PTYPE_L4_TCP,
+ [L3_OTHER][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
+ [L3_OTHER][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [L3_OTHER][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
+ [L3_OTHER][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [L3_OTHER][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+};
+
+static inline uint32_t __hot
+nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
+{
+ return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
+}
+
+static inline int __hot
+nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
+{
+ int i;
+ uint32_t ltail, next_tail;
+ struct nicvf_rbdr *rbdr = rxq->shared_rbdr;
+ uint64_t mbuf_phys_off = rxq->mbuf_phys_off;
+ struct rbdr_entry_t *desc = rbdr->desc;
+ uint32_t qlen_mask = rbdr->qlen_mask;
+ uintptr_t door = rbdr->rbdr_door;
+ void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned;
+
+ if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) {
+ rxq->nic->eth_dev->data->rx_mbuf_alloc_failed += to_fill;
+ return 0;
+ }
+
+ NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
+ (nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
+
+ next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
+ __ATOMIC_ACQUIRE);
+ ltail = next_tail;
+ for (i = 0; i < to_fill; i++) {
+ struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
+
+ entry->full_addr = nicvf_mbuff_virt2phy((uintptr_t)obj_p[i],
+ mbuf_phys_off);
+ ltail++;
+ }
+
+ while (__atomic_load_n(&rbdr->tail, __ATOMIC_RELAXED) != next_tail)
+ rte_pause();
+
+ __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
+ nicvf_addr_write(door, to_fill);
+ return to_fill;
+}
+
+static inline int32_t __hot
+nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts,
+ int32_t available_space)
+{
+ if (unlikely(available_space < nb_pkts))
+ rxq->available_space = nicvf_addr_read(rxq->cq_status)
+ & NICVF_CQ_CQE_COUNT_MASK;
+
+ return RTE_MIN(nb_pkts, available_space);
+}
+
+static inline void __hot
+nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
+ struct rte_mbuf *pkt)
+{
+ if (likely(cqe_rx_w0.rss_alg)) {
+ pkt->hash.rss = cqe_rx_w2.rss_tag;
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ }
+}
+
+uint16_t __hot
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ uint32_t i, to_process;
+ struct cqe_rx_t *cqe_rx;
+ struct rte_mbuf *pkt;
+ cqe_rx_word0_t cqe_rx_w0;
+ cqe_rx_word1_t cqe_rx_w1;
+ cqe_rx_word2_t cqe_rx_w2;
+ cqe_rx_word3_t cqe_rx_w3;
+ struct nicvf_rxq *rxq = rx_queue;
+ union cq_entry_t *desc = rxq->desc;
+ const uint64_t cqe_mask = rxq->qlen_mask;
+ uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
+ uint32_t cqe_head = rxq->head & cqe_mask;
+ int32_t available_space = rxq->available_space;
+ uint8_t port_id = rxq->port_id;
+ const uint8_t rbptr_offset = rxq->rbptr_offset;
+
+ to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
+
+ for (i = 0; i < to_process; i++) {
+ rte_prefetch_non_temporal(&desc[cqe_head + 2]);
+ cqe_rx = (struct cqe_rx_t *)&desc[cqe_head];
+ NICVF_RX_ASSERT(((struct cq_entry_type_t *)cqe_rx)->cqe_type
+ == CQE_TYPE_RX);
+
+ NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx);
+ NICVF_LOAD_PAIR(cqe_rx_w2.u64, cqe_rx_w3.u64, &cqe_rx->word2);
+ rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
+ pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
+ (rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
+
+ pkt->ol_flags = 0;
+ pkt->port = port_id;
+ pkt->data_len = cqe_rx_w3.rb0_sz;
+ pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
+ pkt->nb_segs = 1;
+ pkt->pkt_len = cqe_rx_w3.rb0_sz;
+ pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+
+ nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
+ rte_mbuf_refcnt_set(pkt, 1);
+ rx_pkts[i] = pkt;
+ cqe_head = (cqe_head + 1) & cqe_mask;
+ nicvf_prefetch_store_keep(pkt);
+ }
+
+ if (likely(to_process)) {
+ rxq->available_space -= to_process;
+ rxq->head = cqe_head;
+ nicvf_addr_write(rxq->cq_door, to_process);
+ rxq->recv_buffers += to_process;
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq,
+ rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
+ }
+ }
+
+ return to_process;
+}
+
+static inline uint16_t __hot
+nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
+ uint64_t mbuf_phys_off, uint8_t port_id,
+ struct rte_mbuf **rx_pkt, uint8_t rbptr_offset)
+{
+ struct rte_mbuf *pkt, *seg, *prev;
+ cqe_rx_word0_t cqe_rx_w0;
+ cqe_rx_word1_t cqe_rx_w1;
+ cqe_rx_word2_t cqe_rx_w2;
+ uint16_t *rb_sz, nb_segs, seg_idx;
+ uint64_t *rb_ptr;
+
+ NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx);
+ NICVF_RX_ASSERT(cqe_rx_w0.cqe_type == CQE_TYPE_RX);
+ cqe_rx_w2 = cqe_rx->word2;
+ rb_sz = &cqe_rx->word3.rb0_sz;
+ rb_ptr = (uint64_t *)cqe_rx + rbptr_offset;
+ nb_segs = cqe_rx_w0.rb_cnt;
+ pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
+ (rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
+
+ pkt->ol_flags = 0;
+ pkt->port = port_id;
+ pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
+ pkt->nb_segs = nb_segs;
+ pkt->pkt_len = cqe_rx_w1.pkt_len;
+ pkt->data_len = rb_sz[nicvf_frag_num(0)];
+ rte_mbuf_refcnt_set(pkt, 1);
+ pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+ nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
+
+ *rx_pkt = pkt;
+ prev = pkt;
+ for (seg_idx = 1; seg_idx < nb_segs; seg_idx++) {
+ seg = (struct rte_mbuf *)nicvf_mbuff_phy2virt
+ (rb_ptr[seg_idx], mbuf_phys_off);
+
+ prev->next = seg;
+ seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
+ seg->port = port_id;
+ seg->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_mbuf_refcnt_set(seg, 1);
+
+ prev = seg;
+ }
+ prev->next = NULL;
+ return nb_segs;
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ union cq_entry_t *cq_entry;
+ struct cqe_rx_t *cqe_rx;
+ struct nicvf_rxq *rxq = rx_queue;
+ union cq_entry_t *desc = rxq->desc;
+ const uint64_t cqe_mask = rxq->qlen_mask;
+ uint64_t mbuf_phys_off = rxq->mbuf_phys_off;
+ uint32_t i, to_process, cqe_head, buffers_consumed = 0;
+ int32_t available_space = rxq->available_space;
+ uint16_t nb_segs;
+ const uint8_t port_id = rxq->port_id;
+ const uint8_t rbptr_offset = rxq->rbptr_offset;
+
+ cqe_head = rxq->head & cqe_mask;
+ to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
+
+ for (i = 0; i < to_process; i++) {
+ rte_prefetch_non_temporal(&desc[cqe_head + 2]);
+ cq_entry = &desc[cqe_head];
+ cqe_rx = (struct cqe_rx_t *)cq_entry;
+ nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
+ port_id, rx_pkts + i, rbptr_offset);
+ buffers_consumed += nb_segs;
+ cqe_head = (cqe_head + 1) & cqe_mask;
+ nicvf_prefetch_store_keep(rx_pkts[i]);
+ }
+
+ if (likely(to_process)) {
+ rxq->available_space -= to_process;
+ rxq->head = cqe_head;
+ nicvf_addr_write(rxq->cq_door, to_process);
+ rxq->recv_buffers += buffers_consumed;
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -=
+ nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
+ }
+ }
+
+ return to_process;
+}
+
+uint32_t
+nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct nicvf_rxq *rxq;
+
+ rxq = dev->data->rx_queues[queue_idx];
+ return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
+}
+
+uint32_t
+nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct nicvf_rxq *rxq;
+ uint32_t to_process;
+ uint32_t rx_free;
+
+ rxq = dev->data->rx_queues[queue_idx];
+ to_process = rxq->recv_buffers;
+ while (rxq->recv_buffers > 0) {
+ rx_free = RTE_MIN(rxq->recv_buffers, NICVF_MAX_RX_FREE_THRESH);
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rx_free);
+ }
+
+ assert(rxq->recv_buffers == 0);
+ return to_process;
+}
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
new file mode 100644
index 00000000..9dad8a5a
--- /dev/null
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -0,0 +1,101 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __THUNDERX_NICVF_RXTX_H__
+#define __THUNDERX_NICVF_RXTX_H__
+
+#include <rte_byteorder.h>
+#include <rte_ethdev.h>
+
+#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+static inline uint16_t __attribute__((const))
+nicvf_frag_num(uint16_t i)
+{
+ return (i & ~3) + 3 - (i & 3);
+}
+
+static inline void __hot
+fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
+{
+ /* Local variable sqe to avoid read from sq desc memory*/
+ union sq_entry_t sqe;
+
+ /* Fill the SQ gather entry */
+ sqe.buff[0] = 0; sqe.buff[1] = 0;
+ sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER;
+ sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT;
+ sqe.gather.size = pkt->data_len;
+ sqe.gather.addr = rte_mbuf_data_dma_addr(pkt);
+
+ entry->buff[0] = sqe.buff[0];
+ entry->buff[1] = sqe.buff[1];
+}
+
+#else
+
+static inline uint16_t __attribute__((const))
+nicvf_frag_num(uint16_t i)
+{
+ return i;
+}
+
+static inline void __hot
+fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
+{
+ entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
+ (uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 |
+ pkt->data_len;
+ entry->buff[1] = rte_mbuf_data_dma_addr(pkt);
+}
+#endif
+
+uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
+uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
+
+uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
+uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
+uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t pkts);
+
+void nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq);
+void nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq);
+
+#endif /* __THUNDERX_NICVF_RXTX_H__ */
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
new file mode 100644
index 00000000..c52545d9
--- /dev/null
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -0,0 +1,124 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _THUNDERX_NICVF_STRUCT_H
+#define _THUNDERX_NICVF_STRUCT_H
+
+#include <stdint.h>
+
+#include <rte_spinlock.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+
+struct nicvf_rbdr {
+ uint64_t rbdr_status;
+ uint64_t rbdr_door;
+ struct rbdr_entry_t *desc;
+ nicvf_phys_addr_t phys;
+ uint32_t buffsz;
+ uint32_t tail;
+ uint32_t next_tail;
+ uint32_t head;
+ uint32_t qlen_mask;
+} __rte_cache_aligned;
+
+struct nicvf_txq {
+ union sq_entry_t *desc;
+ nicvf_phys_addr_t phys;
+ struct rte_mbuf **txbuffs;
+ uint64_t sq_head;
+ uint64_t sq_door;
+ struct rte_mempool *pool;
+ struct nicvf *nic;
+ void (*pool_free)(struct nicvf_txq *sq);
+ uint32_t head;
+ uint32_t tail;
+ int32_t xmit_bufs;
+ uint32_t qlen_mask;
+ uint32_t txq_flags;
+ uint16_t queue_id;
+ uint16_t tx_free_thresh;
+} __rte_cache_aligned;
+
+struct nicvf_rxq {
+ uint64_t mbuf_phys_off;
+ uint64_t cq_status;
+ uint64_t cq_door;
+ nicvf_phys_addr_t phys;
+ union cq_entry_t *desc;
+ struct nicvf_rbdr *shared_rbdr;
+ struct nicvf *nic;
+ struct rte_mempool *pool;
+ uint32_t head;
+ uint32_t qlen_mask;
+ int32_t available_space;
+ int32_t recv_buffers;
+ uint16_t rx_free_thresh;
+ uint16_t queue_id;
+ uint16_t precharge_cnt;
+ uint8_t rx_drop_en;
+ uint8_t port_id;
+ uint8_t rbptr_offset;
+} __rte_cache_aligned;
+
+struct nicvf {
+ uint8_t vf_id;
+ uint8_t node;
+ uintptr_t reg_base;
+ bool tns_mode;
+ bool sqs_mode;
+ bool loopback_supported;
+ bool pf_acked:1;
+ bool pf_nacked:1;
+ uint64_t hwcap;
+ uint8_t link_up;
+ uint8_t duplex;
+ uint32_t speed;
+ uint32_t msg_enable;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+ struct nicvf_rbdr *rbdr;
+ struct nicvf_rss_reta_info rss_info;
+ struct rte_eth_dev *eth_dev;
+ struct rte_intr_handle intr_handle;
+ uint8_t cpi_alg;
+ uint16_t mtu;
+ bool vlan_filter_en;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+} __rte_cache_aligned;
+
+#endif /* _THUNDERX_NICVF_STRUCT_H */
diff --git a/drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map b/drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map
new file mode 100644
index 00000000..1901bcb3
--- /dev/null
+++ b/drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map
@@ -0,0 +1,4 @@
+DPDK_16.07 {
+
+ local: *;
+};
diff --git a/drivers/net/vhost/Makefile b/drivers/net/vhost/Makefile
index f49a69b3..050c5aa5 100644
--- a/drivers/net/vhost/Makefile
+++ b/drivers/net/vhost/Makefile
@@ -36,6 +36,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_vhost.a
+LDLIBS += -lpthread
+
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
@@ -54,7 +56,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += rte_eth_vhost.c
SYMLINK-y-include += rte_eth_vhost.h
# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_kvargs
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_vhost
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 310cbefc..3b509465 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -50,12 +50,14 @@
#define ETH_VHOST_IFACE_ARG "iface"
#define ETH_VHOST_QUEUES_ARG "queues"
+#define ETH_VHOST_CLIENT_ARG "client"
static const char *drivername = "VHOST PMD";
static const char *valid_arguments[] = {
ETH_VHOST_IFACE_ARG,
ETH_VHOST_QUEUES_ARG,
+ ETH_VHOST_CLIENT_ARG,
NULL
};
@@ -71,9 +73,9 @@ static struct ether_addr base_eth_addr = {
};
struct vhost_queue {
+ int vid;
rte_atomic32_t allow_queuing;
rte_atomic32_t while_queuing;
- struct virtio_net *device;
struct pmd_internal *internal;
struct rte_mempool *mb_pool;
uint8_t port;
@@ -89,6 +91,7 @@ struct pmd_internal {
char *dev_name;
char *iface_name;
uint16_t max_queues;
+ uint64_t flags;
volatile uint16_t once;
};
@@ -139,7 +142,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
goto out;
/* Dequeue packets from guest TX queue */
- nb_rx = rte_vhost_dequeue_burst(r->device,
+ nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
r->rx_pkts += nb_rx;
@@ -170,7 +173,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
goto out;
/* Enqueue packets to guest RX queue */
- nb_tx = rte_vhost_enqueue_burst(r->device,
+ nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
r->tx_pkts += nb_tx;
@@ -222,25 +225,22 @@ find_internal_resource(char *ifname)
}
static int
-new_device(struct virtio_net *dev)
+new_device(int vid)
{
struct rte_eth_dev *eth_dev;
struct internal_list *list;
struct pmd_internal *internal;
struct vhost_queue *vq;
unsigned i;
+ char ifname[PATH_MAX];
#ifdef RTE_LIBRTE_VHOST_NUMA
- int newnode, ret;
+ int newnode;
#endif
- if (dev == NULL) {
- RTE_LOG(INFO, PMD, "Invalid argument\n");
- return -1;
- }
-
- list = find_internal_resource(dev->ifname);
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+ list = find_internal_resource(ifname);
if (list == NULL) {
- RTE_LOG(INFO, PMD, "Invalid device name\n");
+ RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
return -1;
}
@@ -248,21 +248,16 @@ new_device(struct virtio_net *dev)
internal = eth_dev->data->dev_private;
#ifdef RTE_LIBRTE_VHOST_NUMA
- ret = get_mempolicy(&newnode, NULL, 0, dev,
- MPOL_F_NODE | MPOL_F_ADDR);
- if (ret < 0) {
- RTE_LOG(ERR, PMD, "Unknown numa node\n");
- return -1;
- }
-
- eth_dev->data->numa_node = newnode;
+ newnode = rte_vhost_get_numa_node(vid);
+ if (newnode >= 0)
+ eth_dev->data->numa_node = newnode;
#endif
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
vq = eth_dev->data->rx_queues[i];
if (vq == NULL)
continue;
- vq->device = dev;
+ vq->vid = vid;
vq->internal = internal;
vq->port = eth_dev->data->port_id;
}
@@ -270,16 +265,14 @@ new_device(struct virtio_net *dev)
vq = eth_dev->data->tx_queues[i];
if (vq == NULL)
continue;
- vq->device = dev;
+ vq->vid = vid;
vq->internal = internal;
vq->port = eth_dev->data->port_id;
}
- for (i = 0; i < dev->virt_qp_nb * VIRTIO_QNUM; i++)
- rte_vhost_enable_guest_notification(dev, i, 0);
+ for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
+ rte_vhost_enable_guest_notification(vid, i, 0);
- dev->flags |= VIRTIO_DEV_RUNNING;
- dev->priv = eth_dev;
eth_dev->data->dev_link.link_status = ETH_LINK_UP;
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -303,22 +296,21 @@ new_device(struct virtio_net *dev)
}
static void
-destroy_device(volatile struct virtio_net *dev)
+destroy_device(int vid)
{
struct rte_eth_dev *eth_dev;
struct vhost_queue *vq;
+ struct internal_list *list;
+ char ifname[PATH_MAX];
unsigned i;
- if (dev == NULL) {
- RTE_LOG(INFO, PMD, "Invalid argument\n");
- return;
- }
-
- eth_dev = (struct rte_eth_dev *)dev->priv;
- if (eth_dev == NULL) {
- RTE_LOG(INFO, PMD, "Failed to find a ethdev\n");
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+ list = find_internal_resource(ifname);
+ if (list == NULL) {
+ RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
return;
}
+ eth_dev = list->eth_dev;
/* Wait until rx/tx_pkt_burst stops accessing vhost device */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
@@ -340,20 +332,17 @@ destroy_device(volatile struct virtio_net *dev)
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- dev->priv = NULL;
- dev->flags &= ~VIRTIO_DEV_RUNNING;
-
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
vq = eth_dev->data->rx_queues[i];
if (vq == NULL)
continue;
- vq->device = NULL;
+ vq->vid = -1;
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
vq = eth_dev->data->tx_queues[i];
if (vq == NULL)
continue;
- vq->device = NULL;
+ vq->vid = -1;
}
RTE_LOG(INFO, PMD, "Connection closed\n");
@@ -362,20 +351,17 @@ destroy_device(volatile struct virtio_net *dev)
}
static int
-vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable)
+vring_state_changed(int vid, uint16_t vring, int enable)
{
struct rte_vhost_vring_state *state;
struct rte_eth_dev *eth_dev;
struct internal_list *list;
+ char ifname[PATH_MAX];
- if (dev == NULL) {
- RTE_LOG(ERR, PMD, "Invalid argument\n");
- return -1;
- }
-
- list = find_internal_resource(dev->ifname);
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
+ list = find_internal_resource(ifname);
if (list == NULL) {
- RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", dev->ifname);
+ RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
return -1;
}
@@ -484,7 +470,8 @@ eth_dev_start(struct rte_eth_dev *dev)
int ret = 0;
if (rte_atomic16_cmpset(&internal->once, 0, 1)) {
- ret = rte_vhost_driver_register(internal->iface_name);
+ ret = rte_vhost_driver_register(internal->iface_name,
+ internal->flags);
if (ret)
return ret;
}
@@ -607,7 +594,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->ipackets = rx_total;
stats->opackets = tx_total;
- stats->imissed = tx_missed_total;
+ stats->oerrors = tx_missed_total;
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
}
@@ -689,7 +676,7 @@ static const struct eth_dev_ops ops = {
static int
eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
- const unsigned numa_node)
+ const unsigned numa_node, uint64_t flags)
{
struct rte_eth_dev_data *data = NULL;
struct pmd_internal *internal = NULL;
@@ -746,6 +733,7 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
internal->iface_name = strdup(iface_name);
if (internal->iface_name == NULL)
goto error;
+ internal->flags = flags;
list->eth_dev = eth_dev;
pthread_mutex_lock(&internal_list_lock);
@@ -810,18 +798,15 @@ open_iface(const char *key __rte_unused, const char *value, void *extra_args)
}
static inline int
-open_queues(const char *key __rte_unused, const char *value, void *extra_args)
+open_int(const char *key __rte_unused, const char *value, void *extra_args)
{
- uint16_t *q = extra_args;
+ uint16_t *n = extra_args;
if (value == NULL || extra_args == NULL)
return -EINVAL;
- *q = (uint16_t)strtoul(value, NULL, 0);
- if (*q == USHRT_MAX && errno == ERANGE)
- return -1;
-
- if (*q > RTE_MAX_QUEUES_PER_PORT)
+ *n = (uint16_t)strtoul(value, NULL, 0);
+ if (*n == USHRT_MAX && errno == ERANGE)
return -1;
return 0;
@@ -834,6 +819,8 @@ rte_pmd_vhost_devinit(const char *name, const char *params)
int ret = 0;
char *iface_name;
uint16_t queues;
+ uint64_t flags = 0;
+ int client_mode = 0;
RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
@@ -853,14 +840,24 @@ rte_pmd_vhost_devinit(const char *name, const char *params)
if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
- &open_queues, &queues);
- if (ret < 0)
+ &open_int, &queues);
+ if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
goto out_free;
} else
queues = 1;
- eth_dev_vhost_create(name, iface_name, queues, rte_socket_id());
+ if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
+ &open_int, &client_mode);
+ if (ret < 0)
+ goto out_free;
+
+ if (client_mode)
+ flags |= RTE_VHOST_USER_CLIENT;
+ }
+
+ eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags);
out_free:
rte_kvargs_free(kvlist);
diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile
index ef84f604..3020b688 100644
--- a/drivers/net/virtio/Makefile
+++ b/drivers/net/virtio/Makefile
@@ -55,9 +55,16 @@ ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSSE3,$(CFLAGS)),RTE_MACHINE_CPUFLAG_SSSE
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple.c
endif
+ifeq ($(CONFIG_RTE_VIRTIO_USER),y)
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_user.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/virtio_user_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user_ethdev.c
+endif
+
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_net
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 63a368ac..480daa37 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,8 +59,6 @@
#include "virtqueue.h"
#include "virtio_rxtx.h"
-
-static int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
static int virtio_dev_configure(struct rte_eth_dev *dev);
static int virtio_dev_start(struct rte_eth_dev *dev);
@@ -80,7 +78,10 @@ static void virtio_get_hwaddr(struct virtio_hw *hw);
static void virtio_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
+static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit);
static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
@@ -115,40 +116,61 @@ struct rte_virtio_xstats_name_off {
};
/* [rt]x_qX_ is prepended to the name string here */
-static const struct rte_virtio_xstats_name_off rte_virtio_q_stat_strings[] = {
- {"good_packets", offsetof(struct virtqueue, packets)},
- {"good_bytes", offsetof(struct virtqueue, bytes)},
- {"errors", offsetof(struct virtqueue, errors)},
- {"multicast_packets", offsetof(struct virtqueue, multicast)},
- {"broadcast_packets", offsetof(struct virtqueue, broadcast)},
- {"undersize_packets", offsetof(struct virtqueue, size_bins[0])},
- {"size_64_packets", offsetof(struct virtqueue, size_bins[1])},
- {"size_65_127_packets", offsetof(struct virtqueue, size_bins[2])},
- {"size_128_255_packets", offsetof(struct virtqueue, size_bins[3])},
- {"size_256_511_packets", offsetof(struct virtqueue, size_bins[4])},
- {"size_512_1023_packets", offsetof(struct virtqueue, size_bins[5])},
- {"size_1024_1517_packets", offsetof(struct virtqueue, size_bins[6])},
- {"size_1518_max_packets", offsetof(struct virtqueue, size_bins[7])},
+static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
+ {"good_packets", offsetof(struct virtnet_rx, stats.packets)},
+ {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)},
+ {"errors", offsetof(struct virtnet_rx, stats.errors)},
+ {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)},
+ {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)},
+ {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])},
+ {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])},
+ {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])},
+ {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])},
+ {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])},
+ {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])},
+ {"size_1024_1517_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
+ {"size_1518_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])},
+};
+
+/* [rt]x_qX_ is prepended to the name string here */
+static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
+ {"good_packets", offsetof(struct virtnet_tx, stats.packets)},
+ {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
+ {"errors", offsetof(struct virtnet_tx, stats.errors)},
+ {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
+ {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
+ {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
+ {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])},
+ {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])},
+ {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])},
+ {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])},
+ {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])},
+ {"size_1024_1517_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
+ {"size_1518_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])},
};
-#define VIRTIO_NB_Q_XSTATS (sizeof(rte_virtio_q_stat_strings) / \
- sizeof(rte_virtio_q_stat_strings[0]))
+#define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
+ sizeof(rte_virtio_rxq_stat_strings[0]))
+#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
+ sizeof(rte_virtio_txq_stat_strings[0]))
static int
-virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
+virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
{
uint32_t head, i;
int k, sum = 0;
virtio_net_ctrl_ack status = ~0;
struct virtio_pmd_ctrl result;
+ struct virtqueue *vq;
ctrl->status = status;
- if (!(vq && vq->hw->cvq)) {
+ if (!cvq && !cvq->vq) {
PMD_INIT_LOG(ERR, "Control queue is not supported.");
return -1;
}
+ vq = cvq->vq;
head = vq->vq_desc_head_idx;
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
@@ -158,7 +180,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
return -1;
- memcpy(vq->virtio_net_hdr_mz->addr, ctrl,
+ memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
/*
@@ -168,14 +190,14 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
* One RX packet for ACK.
*/
vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
- vq->vq_ring.desc[head].addr = vq->virtio_net_hdr_mz->phys_addr;
+ vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
i = vq->vq_ring.desc[head].next;
for (k = 0; k < pkt_num; k++) {
vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr
+ vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
vq->vq_ring.desc[i].len = dlen[k];
@@ -185,7 +207,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
}
vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
- vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr
+ vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
vq->vq_ring.desc[i].len = sizeof(ctrl->status);
vq->vq_free_cnt--;
@@ -200,12 +222,12 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
virtqueue_notify(vq);
rte_rmb();
- while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ while (VIRTQUEUE_NUSED(vq) == 0) {
rte_rmb();
usleep(100);
}
- while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ while (VIRTQUEUE_NUSED(vq)) {
uint32_t idx, desc_idx, used_idx;
struct vring_used_elem *uep;
@@ -230,7 +252,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
vq->vq_free_cnt, vq->vq_desc_head_idx);
- memcpy(&result, vq->virtio_net_hdr_mz->addr,
+ memcpy(&result, cvq->virtio_net_hdr_mz->addr,
sizeof(struct virtio_pmd_ctrl));
return result.status;
@@ -261,12 +283,14 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
}
void
-virtio_dev_queue_release(struct virtqueue *vq) {
+virtio_dev_queue_release(struct virtqueue *vq)
+{
struct virtio_hw *hw;
if (vq) {
hw = vq->hw;
- hw->vtpci_ops->del_queue(hw, vq);
+ if (vq->configured)
+ hw->vtpci_ops->del_queue(hw, vq);
rte_free(vq->sw_ring);
rte_free(vq);
@@ -279,13 +303,21 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
uint16_t vtpci_queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- struct virtqueue **pvq)
+ void **pvq)
{
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
- const struct rte_memzone *mz;
+ char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
+ const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
unsigned int vq_size, size;
struct virtio_hw *hw = dev->data->dev_private;
- struct virtqueue *vq = NULL;
+ struct virtnet_rx *rxvq = NULL;
+ struct virtnet_tx *txvq = NULL;
+ struct virtnet_ctl *cvq = NULL;
+ struct virtqueue *vq;
+ const char *queue_names[] = {"rvq", "txq", "cvq"};
+ size_t sz_vq, sz_q = 0, sz_hdr_mz = 0;
+ void *sw_ring = NULL;
+ int ret;
PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
@@ -305,39 +337,33 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
+ snprintf(vq_name, sizeof(vq_name), "port%d_%s%d",
+ dev->data->port_id, queue_names[queue_type], queue_idx);
+
+ sz_vq = RTE_ALIGN_CEIL(sizeof(*vq) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
if (queue_type == VTNET_RQ) {
- snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
- dev->data->port_id, queue_idx);
- vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
- vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
- vq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
- (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
- sizeof(vq->sw_ring[0]), RTE_CACHE_LINE_SIZE, socket_id);
+ sz_q = sz_vq + sizeof(*rxvq);
} else if (queue_type == VTNET_TQ) {
- snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
- dev->data->port_id, queue_idx);
- vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
- vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
+ sz_q = sz_vq + sizeof(*txvq);
+ /*
+ * For each xmit packet, allocate a virtio_net_hdr
+ * and indirect ring elements
+ */
+ sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
} else if (queue_type == VTNET_CQ) {
- snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
- dev->data->port_id);
- vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
- vq_size * sizeof(struct vq_desc_extra),
- RTE_CACHE_LINE_SIZE);
+ sz_q = sz_vq + sizeof(*cvq);
+ /* Allocate a page for control vq command, data and status */
+ sz_hdr_mz = PAGE_SIZE;
}
+
+ vq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id);
if (vq == NULL) {
- PMD_INIT_LOG(ERR, "Can not allocate virtqueue");
+ PMD_INIT_LOG(ERR, "can not allocate vq");
return -ENOMEM;
}
- if (queue_type == VTNET_RQ && vq->sw_ring == NULL) {
- PMD_INIT_LOG(ERR, "Can not allocate RX soft ring");
- rte_free(vq);
- return -ENOMEM;
- }
-
vq->hw = hw;
- vq->port_id = dev->data->port_id;
- vq->queue_id = queue_idx;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
@@ -350,64 +376,103 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
*/
size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
- PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
+ PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
+ size, vq->vq_ring_size);
- mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
- socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id,
+ 0, VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
mz = rte_memzone_lookup(vq_name);
if (mz == NULL) {
- rte_free(vq);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_q_alloc;
}
}
- /*
- * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
- * and only accepts 32 bit page frame number.
- * Check if the allocated physical memory exceeds 16TB.
- */
- if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
- PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
- rte_free(vq);
- return -ENOMEM;
- }
-
memset(mz->addr, 0, sizeof(mz->len));
- vq->mz = mz;
+
vq->vq_ring_mem = mz->phys_addr;
vq->vq_ring_virt_mem = mz->addr;
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64, (uint64_t)mz->phys_addr);
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)(uintptr_t)mz->addr);
- vq->virtio_net_hdr_mz = NULL;
- vq->virtio_net_hdr_mem = 0;
-
- if (queue_type == VTNET_TQ) {
- const struct rte_memzone *hdr_mz;
- struct virtio_tx_region *txr;
- unsigned int i;
-
- /*
- * For each xmit packet, allocate a virtio_net_hdr
- * and indirect ring elements
- */
- snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
- dev->data->port_id, queue_idx);
- hdr_mz = rte_memzone_reserve_aligned(vq_name,
- vq_size * sizeof(*txr),
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
+ (uint64_t)mz->phys_addr);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
+ (uint64_t)(uintptr_t)mz->addr);
+
+ if (sz_hdr_mz) {
+ snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_%s%d_hdr",
+ dev->data->port_id, queue_names[queue_type],
+ queue_idx);
+ hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
socket_id, 0,
RTE_CACHE_LINE_SIZE);
if (hdr_mz == NULL) {
if (rte_errno == EEXIST)
- hdr_mz = rte_memzone_lookup(vq_name);
+ hdr_mz = rte_memzone_lookup(vq_hdr_name);
if (hdr_mz == NULL) {
- rte_free(vq);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_q_alloc;
}
}
- vq->virtio_net_hdr_mz = hdr_mz;
- vq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ }
+
+ if (queue_type == VTNET_RQ) {
+ size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
+ sizeof(vq->sw_ring[0]);
+
+ sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!sw_ring) {
+ PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
+ ret = -ENOMEM;
+ goto fail_q_alloc;
+ }
+
+ vq->sw_ring = sw_ring;
+ rxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq);
+ rxvq->vq = vq;
+ rxvq->port_id = dev->data->port_id;
+ rxvq->queue_id = queue_idx;
+ rxvq->mz = mz;
+ *pvq = rxvq;
+ } else if (queue_type == VTNET_TQ) {
+ txvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq);
+ txvq->vq = vq;
+ txvq->port_id = dev->data->port_id;
+ txvq->queue_id = queue_idx;
+ txvq->mz = mz;
+ txvq->virtio_net_hdr_mz = hdr_mz;
+ txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+
+ *pvq = txvq;
+ } else if (queue_type == VTNET_CQ) {
+ cvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq);
+ cvq->vq = vq;
+ cvq->mz = mz;
+ cvq->virtio_net_hdr_mz = hdr_mz;
+ cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
+ *pvq = cvq;
+ }
+
+ /* For virtio-user case (that is when dev->pci_dev is NULL), we use
+ * virtual address. And we need properly set _offset_, please see
+ * MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
+ */
+ if (dev->pci_dev)
+ vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
+ else {
+ vq->vq_ring_mem = (uintptr_t)mz->addr;
+ vq->offset = offsetof(struct rte_mbuf, buf_addr);
+ if (queue_type == VTNET_TQ)
+ txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ else if (queue_type == VTNET_CQ)
+ cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ }
+
+ if (queue_type == VTNET_TQ) {
+ struct virtio_tx_region *txr;
+ unsigned int i;
txr = hdr_mz->addr;
memset(txr, 0, vq_size * sizeof(*txr));
@@ -417,57 +482,50 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
/* first indirect descriptor is always the tx header */
- start_dp->addr = vq->virtio_net_hdr_mem
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ i * sizeof(*txr)
+ offsetof(struct virtio_tx_region, tx_hdr);
- start_dp->len = vq->hw->vtnet_hdr_size;
+ start_dp->len = hw->vtnet_hdr_size;
start_dp->flags = VRING_DESC_F_NEXT;
}
-
- } else if (queue_type == VTNET_CQ) {
- /* Allocate a page for control vq command, data and status */
- snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
- dev->data->port_id);
- vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
- PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE);
- if (vq->virtio_net_hdr_mz == NULL) {
- if (rte_errno == EEXIST)
- vq->virtio_net_hdr_mz =
- rte_memzone_lookup(vq_name);
- if (vq->virtio_net_hdr_mz == NULL) {
- rte_free(vq);
- return -ENOMEM;
- }
- }
- vq->virtio_net_hdr_mem =
- vq->virtio_net_hdr_mz->phys_addr;
- memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
}
- hw->vtpci_ops->setup_queue(hw, vq);
+ if (hw->vtpci_ops->setup_queue(hw, vq) < 0) {
+ PMD_INIT_LOG(ERR, "setup_queue failed");
+ virtio_dev_queue_release(vq);
+ return -EINVAL;
+ }
- *pvq = vq;
+ vq->configured = 1;
return 0;
+
+fail_q_alloc:
+ rte_free(sw_ring);
+ rte_memzone_free(hdr_mz);
+ rte_memzone_free(mz);
+ rte_free(vq);
+
+ return ret;
}
static int
virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
uint32_t socket_id)
{
- struct virtqueue *vq;
+ struct virtnet_ctl *cvq;
int ret;
struct virtio_hw *hw = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
- vtpci_queue_idx, 0, socket_id, &vq);
+ vtpci_queue_idx, 0, socket_id, (void **)&cvq);
if (ret < 0) {
PMD_INIT_LOG(ERR, "control vq initialization failed");
return ret;
}
- hw->cvq = vq;
+ hw->cvq = cvq;
return 0;
}
@@ -491,7 +549,6 @@ static void
virtio_dev_close(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
- struct rte_pci_device *pci_dev = dev->pci_dev;
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
@@ -499,7 +556,7 @@ virtio_dev_close(struct rte_eth_dev *dev)
virtio_dev_stop(dev);
/* reset the NIC */
- if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
vtpci_reset(hw);
virtio_dev_free_mbufs(dev);
@@ -614,6 +671,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.dev_infos_get = virtio_dev_info_get,
.stats_get = virtio_dev_stats_get,
.xstats_get = virtio_dev_xstats_get,
+ .xstats_get_names = virtio_dev_xstats_get_names,
.stats_reset = virtio_dev_stats_reset,
.xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
@@ -675,83 +733,121 @@ virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
unsigned i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- const struct virtqueue *txvq = dev->data->tx_queues[i];
+ const struct virtnet_tx *txvq = dev->data->tx_queues[i];
if (txvq == NULL)
continue;
- stats->opackets += txvq->packets;
- stats->obytes += txvq->bytes;
- stats->oerrors += txvq->errors;
+ stats->opackets += txvq->stats.packets;
+ stats->obytes += txvq->stats.bytes;
+ stats->oerrors += txvq->stats.errors;
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
- stats->q_opackets[i] = txvq->packets;
- stats->q_obytes[i] = txvq->bytes;
+ stats->q_opackets[i] = txvq->stats.packets;
+ stats->q_obytes[i] = txvq->stats.bytes;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- const struct virtqueue *rxvq = dev->data->rx_queues[i];
+ const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
if (rxvq == NULL)
continue;
- stats->ipackets += rxvq->packets;
- stats->ibytes += rxvq->bytes;
- stats->ierrors += rxvq->errors;
+ stats->ipackets += rxvq->stats.packets;
+ stats->ibytes += rxvq->stats.bytes;
+ stats->ierrors += rxvq->stats.errors;
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
- stats->q_ipackets[i] = rxvq->packets;
- stats->q_ibytes[i] = rxvq->bytes;
+ stats->q_ipackets[i] = rxvq->stats.packets;
+ stats->q_ibytes[i] = rxvq->stats.bytes;
}
}
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
}
+static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned limit)
+{
+ unsigned i;
+ unsigned count = 0;
+ unsigned t;
+
+ unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
+ dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
+
+ if (xstats_names != NULL) {
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtqueue *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL)
+ continue;
+ for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_q%u_%s", i,
+ rte_virtio_rxq_stat_strings[t].name);
+ count++;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtqueue *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL)
+ continue;
+ for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_q%u_%s", i,
+ rte_virtio_txq_stat_strings[t].name);
+ count++;
+ }
+ }
+ return count;
+ }
+ return nstats;
+}
+
static int
-virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
unsigned i;
unsigned count = 0;
- unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS +
- dev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS;
+ unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
+ dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
if (n < nstats)
return nstats;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct virtqueue *rxvq = dev->data->rx_queues[i];
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
if (rxvq == NULL)
continue;
unsigned t;
- for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_q%u_%s", i,
- rte_virtio_q_stat_strings[t].name);
+ for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
xstats[count].value = *(uint64_t *)(((char *)rxvq) +
- rte_virtio_q_stat_strings[t].offset);
+ rte_virtio_rxq_stat_strings[t].offset);
count++;
}
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct virtqueue *txvq = dev->data->tx_queues[i];
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
if (txvq == NULL)
continue;
unsigned t;
- for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_q%u_%s", i,
- rte_virtio_q_stat_strings[t].name);
+ for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
xstats[count].value = *(uint64_t *)(((char *)txvq) +
- rte_virtio_q_stat_strings[t].offset);
+ rte_virtio_txq_stat_strings[t].offset);
count++;
}
}
@@ -771,29 +867,31 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev)
unsigned int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct virtqueue *txvq = dev->data->tx_queues[i];
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
if (txvq == NULL)
continue;
- txvq->packets = 0;
- txvq->bytes = 0;
- txvq->errors = 0;
- txvq->multicast = 0;
- txvq->broadcast = 0;
- memset(txvq->size_bins, 0, sizeof(txvq->size_bins[0]) * 8);
+ txvq->stats.packets = 0;
+ txvq->stats.bytes = 0;
+ txvq->stats.errors = 0;
+ txvq->stats.multicast = 0;
+ txvq->stats.broadcast = 0;
+ memset(txvq->stats.size_bins, 0,
+ sizeof(txvq->stats.size_bins[0]) * 8);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct virtqueue *rxvq = dev->data->rx_queues[i];
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
if (rxvq == NULL)
continue;
- rxvq->packets = 0;
- rxvq->bytes = 0;
- rxvq->errors = 0;
- rxvq->multicast = 0;
- rxvq->broadcast = 0;
- memset(rxvq->size_bins, 0, sizeof(rxvq->size_bins[0]) * 8);
+ rxvq->stats.packets = 0;
+ rxvq->stats.bytes = 0;
+ rxvq->stats.errors = 0;
+ rxvq->stats.multicast = 0;
+ rxvq->stats.broadcast = 0;
+ memset(rxvq->stats.size_bins, 0,
+ sizeof(rxvq->stats.size_bins[0]) * 8);
}
}
@@ -827,7 +925,7 @@ virtio_mac_table_set(struct virtio_hw *hw,
int err, len[2];
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
- PMD_DRV_LOG(INFO, "host does not support mac table\n");
+ PMD_DRV_LOG(INFO, "host does not support mac table");
return;
}
@@ -1027,16 +1125,17 @@ rx_func_get(struct rte_eth_dev *eth_dev)
* This function is based on probe() function in virtio_pci.c
* It returns 0 on success.
*/
-static int
+int
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
struct virtio_net_config *config;
struct virtio_net_config local_config;
struct rte_pci_device *pci_dev;
+ uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
int ret;
- RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr));
+ RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
eth_dev->dev_ops = &virtio_eth_dev_ops;
eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
@@ -1057,9 +1156,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
pci_dev = eth_dev->pci_dev;
- ret = vtpci_init(pci_dev, hw);
- if (ret)
- return ret;
+ if (pci_dev) {
+ ret = vtpci_init(pci_dev, hw, &dev_flags);
+ if (ret)
+ return ret;
+ }
/* Reset the device although not necessary at startup */
vtpci_reset(hw);
@@ -1074,9 +1175,10 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
/* If host does not support status then disable LSC */
if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
- pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
+ dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags = dev_flags;
rx_func_get(eth_dev);
@@ -1150,12 +1252,13 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d hw->max_tx_queues=%d",
hw->max_rx_queues, hw->max_tx_queues);
- PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ if (pci_dev)
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
/* Setup interrupt callback */
- if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
rte_intr_callback_register(&pci_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
@@ -1184,13 +1287,14 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = NULL;
eth_dev->rx_pkt_burst = NULL;
- virtio_dev_queue_release(hw->cvq);
+ if (hw->cvq)
+ virtio_dev_queue_release(hw->cvq->vq);
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
/* reset interrupt callback */
- if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
rte_intr_callback_unregister(&pci_dev->intr_handle,
virtio_interrupt_handler,
eth_dev);
@@ -1240,7 +1344,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
- struct rte_pci_device *pci_dev = dev->pci_dev;
PMD_INIT_LOG(DEBUG, "configure");
@@ -1258,7 +1361,7 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return -ENOTSUP;
}
- if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
PMD_DRV_LOG(ERR, "failed to set config vector");
return -EBUSY;
@@ -1273,11 +1376,12 @@ virtio_dev_start(struct rte_eth_dev *dev)
{
uint16_t nb_queues, i;
struct virtio_hw *hw = dev->data->dev_private;
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct virtnet_rx *rxvq;
+ struct virtnet_tx *txvq __rte_unused;
/* check if lsc interrupt feature is enabled */
if (dev->data->dev_conf.intr_conf.lsc) {
- if (!(pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
+ if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
PMD_DRV_LOG(ERR, "link status not supported by host");
return -ENOTSUP;
}
@@ -1313,16 +1417,22 @@ virtio_dev_start(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
- for (i = 0; i < nb_queues; i++)
- virtqueue_notify(dev->data->rx_queues[i]);
+ for (i = 0; i < nb_queues; i++) {
+ rxvq = dev->data->rx_queues[i];
+ virtqueue_notify(rxvq->vq);
+ }
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxvq = dev->data->rx_queues[i];
+ VIRTQUEUE_DUMP(rxvq->vq);
+ }
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ VIRTQUEUE_DUMP(txvq->vq);
+ }
return 0;
}
@@ -1333,14 +1443,14 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
int i, mbuf_num = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+
PMD_INIT_LOG(DEBUG,
"Before freeing rxq[%d] used and unused buf", i);
- VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+ VIRTQUEUE_DUMP(rxvq->vq);
- PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p",
- i, dev->data->rx_queues[i]);
- while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
- dev->data->rx_queues[i])) != NULL) {
+ PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
+ while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
rte_pktmbuf_free(buf);
mbuf_num++;
}
@@ -1348,27 +1458,27 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
PMD_INIT_LOG(DEBUG,
"After freeing rxq[%d] used and unused buf", i);
- VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+ VIRTQUEUE_DUMP(rxvq->vq);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
+
PMD_INIT_LOG(DEBUG,
"Before freeing txq[%d] used and unused bufs",
i);
- VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+ VIRTQUEUE_DUMP(txvq->vq);
mbuf_num = 0;
- while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
- dev->data->tx_queues[i])) != NULL) {
+ while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
rte_pktmbuf_free(buf);
-
mbuf_num++;
}
PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
PMD_INIT_LOG(DEBUG,
"After freeing txq[%d] used and unused buf", i);
- VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+ VIRTQUEUE_DUMP(txvq->vq);
}
}
@@ -1431,7 +1541,10 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct virtio_hw *hw = dev->data->dev_private;
- dev_info->driver_name = dev->driver->pci_drv.name;
+ if (dev->pci_dev)
+ dev_info->driver_name = dev->driver->pci_drv.name;
+ else
+ dev_info->driver_name = "virtio-user PMD";
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 66423a07..2ecec6eb 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -81,7 +81,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
uint16_t vtpci_queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- struct virtqueue **pvq);
+ void **pvq);
void virtio_dev_queue_release(struct virtqueue *vq);
@@ -113,6 +113,8 @@ uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
+
/*
* The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
* frames larger than 1514 bytes. We do not yet support software LRO
diff --git a/drivers/net/virtio/virtio_logs.h b/drivers/net/virtio/virtio_logs.h
index d6c33f7b..90a79eaa 100644
--- a/drivers/net/virtio/virtio_logs.h
+++ b/drivers/net/virtio/virtio_logs.h
@@ -47,14 +47,14 @@
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s() rx: " fmt , __func__, ## args)
+ RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args)
#else
#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
#endif
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_TX
#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s() tx: " fmt , __func__, ## args)
+ RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args)
#else
#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
#endif
@@ -62,7 +62,7 @@
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER
#define PMD_DRV_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt , __func__, ## args)
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
#endif
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index c007959f..f1a7ca7e 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -55,20 +55,103 @@
*/
#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
+static inline int
+check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+ /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Since we are in legacy mode:
+ * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
+ *
+ * "Note that this is possible because while the virtio header is PCI (i.e.
+ * little) endian, the device-specific region is encoded in the native endian of
+ * the guest (where such distinction is applicable)."
+ *
+ * For powerpc which supports both, qemu supposes that cpu is big endian and
+ * enforces this for the virtio-net stuff.
+ */
static void
legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
void *dst, int length)
{
+#ifdef RTE_ARCH_PPC_64
+ int size;
+
+ while (length > 0) {
+ if (length >= 4) {
+ size = 4;
+ rte_eal_pci_ioport_read(&hw->io, dst, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
+ } else if (length >= 2) {
+ size = 2;
+ rte_eal_pci_ioport_read(&hw->io, dst, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
+ } else {
+ size = 1;
+ rte_eal_pci_ioport_read(&hw->io, dst, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ }
+
+ dst = (char *)dst + size;
+ offset += size;
+ length -= size;
+ }
+#else
rte_eal_pci_ioport_read(&hw->io, dst, length,
VIRTIO_PCI_CONFIG(hw) + offset);
+#endif
}
static void
legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
const void *src, int length)
{
+#ifdef RTE_ARCH_PPC_64
+ union {
+ uint32_t u32;
+ uint16_t u16;
+ } tmp;
+ int size;
+
+ while (length > 0) {
+ if (length >= 4) {
+ size = 4;
+ tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
+ rte_eal_pci_ioport_write(&hw->io, &tmp.u32, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ } else if (length >= 2) {
+ size = 2;
+ tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
+ rte_eal_pci_ioport_write(&hw->io, &tmp.u16, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ } else {
+ size = 1;
+ rte_eal_pci_ioport_write(&hw->io, src, size,
+ VIRTIO_PCI_CONFIG(hw) + offset);
+ }
+
+ src = (const char *)src + size;
+ offset += size;
+ length -= size;
+ }
+#else
rte_eal_pci_ioport_write(&hw->io, src, length,
VIRTIO_PCI_CONFIG(hw) + offset);
+#endif
}
static uint64_t
@@ -143,15 +226,20 @@ legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
return dst;
}
-static void
+static int
legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
uint32_t src;
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
- src = vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+ src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);
+
+ return 0;
}
static void
@@ -179,7 +267,7 @@ legacy_virtio_has_msix(const struct rte_pci_addr *loc)
char dirname[PATH_MAX];
snprintf(dirname, sizeof(dirname),
- SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/msi_irqs",
+ "%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(),
loc->domain, loc->bus, loc->devid, loc->function);
d = opendir(dirname);
@@ -199,15 +287,15 @@ legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
static int
legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
- struct virtio_hw *hw)
+ struct virtio_hw *hw, uint32_t *dev_flags)
{
if (rte_eal_pci_ioport_map(pci_dev, 0, &hw->io) < 0)
return -1;
if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN)
- pci_dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC;
+ *dev_flags |= RTE_ETH_DEV_INTR_LSC;
else
- pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
+ *dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
return 0;
}
@@ -367,13 +455,16 @@ modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
return io_read16(&hw->common_cfg->queue_size);
}
-static void
+static int
modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
uint64_t desc_addr, avail_addr, used_addr;
uint16_t notify_off;
- desc_addr = vq->mz->phys_addr;
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
ring[vq->vq_nentries]),
@@ -400,6 +491,8 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
vq->notify_addr, notify_off);
+
+ return 0;
}
static void
@@ -626,11 +719,13 @@ next:
* Return -1:
* if there is error mapping with VFIO/UIO.
* if port map error when driver type is KDRV_NONE.
+ * if whitelisted but driver type is KDRV_UNKNOWN.
* Return 1 if kernel driver is managing the device.
* Return 0 on success.
*/
int
-vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
+vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
+ uint32_t *dev_flags)
{
hw->dev = dev;
@@ -643,14 +738,15 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
hw->vtpci_ops = &modern_ops;
hw->modern = 1;
- dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC;
+ *dev_flags |= RTE_ETH_DEV_INTR_LSC;
return 0;
}
PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
- if (legacy_virtio_resource_init(dev, hw) < 0) {
+ if (legacy_virtio_resource_init(dev, hw, dev_flags) < 0) {
if (dev->kdrv == RTE_KDRV_UNKNOWN &&
- dev->devargs->type != RTE_DEVTYPE_WHITELISTED_PCI) {
+ (!dev->devargs ||
+ dev->devargs->type != RTE_DEVTYPE_WHITELISTED_PCI)) {
PMD_INIT_LOG(INFO,
"skip kernel managed virtio device.");
return 1;
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index b69785ea..dd7693fe 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -40,6 +40,7 @@
#include <rte_ethdev.h>
struct virtqueue;
+struct virtnet_ctl;
/* VirtIO PCI vendor/device ID. */
#define VIRTIO_PCI_VENDORID 0x1AF4
@@ -234,7 +235,7 @@ struct virtio_pci_ops {
uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
- void (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
+ int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq);
};
@@ -242,7 +243,7 @@ struct virtio_pci_ops {
struct virtio_net_config;
struct virtio_hw {
- struct virtqueue *cvq;
+ struct virtnet_ctl *cvq;
struct rte_pci_ioport io;
uint64_t guest_features;
uint32_t max_tx_queues;
@@ -260,6 +261,7 @@ struct virtio_hw {
struct virtio_pci_common_cfg *common_cfg;
struct virtio_net_config *dev_cfg;
const struct virtio_pci_ops *vtpci_ops;
+ void *virtio_user_dev;
};
/*
@@ -293,7 +295,8 @@ vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
/*
* Function declaration from virtio_pci.c
*/
-int vtpci_init(struct rte_pci_device *, struct virtio_hw *);
+int vtpci_init(struct rte_pci_device *, struct virtio_hw *,
+ uint32_t *dev_flags);
void vtpci_reset(struct virtio_hw *);
void vtpci_reinit_complete(struct virtio_hw *);
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 447760a8..fcecc161 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -79,7 +79,7 @@ struct vring_used_elem {
struct vring_used {
uint16_t flags;
- uint16_t idx;
+ volatile uint16_t idx;
struct vring_used_elem ring[0];
};
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index ef21d8e3..a27208e3 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -193,8 +193,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
start_dp = vq->vq_ring.desc;
start_dp[idx].addr =
- (uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM
- - hw->vtnet_hdr_size);
+ MBUF_DATA_DMA_ADDR(cookie, vq->offset) - hw->vtnet_hdr_size;
start_dp[idx].len =
cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_WRITE;
@@ -209,23 +208,24 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
}
static inline void
-virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
+virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push)
{
struct vq_desc_extra *dxp;
+ struct virtqueue *vq = txvq->vq;
struct vring_desc *start_dp;
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
- uint16_t head_size = txvq->hw->vtnet_hdr_size;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
unsigned long offs;
- head_idx = txvq->vq_desc_head_idx;
+ head_idx = vq->vq_desc_head_idx;
idx = head_idx;
- dxp = &txvq->vq_descx[idx];
+ dxp = &vq->vq_descx[idx];
dxp->cookie = (void *)cookie;
dxp->ndescs = needed;
- start_dp = txvq->vq_ring.desc;
+ start_dp = vq->vq_ring.desc;
if (can_push) {
/* put on zero'd transmit header (no offloads) */
@@ -259,46 +259,32 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
+ offsetof(struct virtio_tx_region, tx_hdr);
start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs;
- start_dp[idx].len = txvq->hw->vtnet_hdr_size;
+ start_dp[idx].len = vq->hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_NEXT;
idx = start_dp[idx].next;
}
do {
- start_dp[idx].addr = rte_mbuf_data_dma_addr(cookie);
+ start_dp[idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset);
start_dp[idx].len = cookie->data_len;
start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
idx = start_dp[idx].next;
} while ((cookie = cookie->next) != NULL);
- start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
-
if (use_indirect)
- idx = txvq->vq_ring.desc[head_idx].next;
-
- txvq->vq_desc_head_idx = idx;
- if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- txvq->vq_desc_tail_idx = idx;
- txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
- vq_update_avail_ring(txvq, head_idx);
-}
+ idx = vq->vq_ring.desc[head_idx].next;
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
-
- return m;
+ vq->vq_desc_head_idx = idx;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+ vq_update_avail_ring(vq, head_idx);
}
static void
-virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
+virtio_dev_vring_start(struct virtqueue *vq)
{
- struct rte_mbuf *m;
- int i, nbufs, error, size = vq->vq_nentries;
+ int size = vq->vq_nentries;
struct vring *vr = &vq->vq_ring;
uint8_t *ring_mem = vq->vq_ring_virt_mem;
@@ -322,30 +308,70 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
* Disable device(host) interrupting guest
*/
virtqueue_disable_intr(vq);
+}
+
+void
+virtio_dev_cq_start(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (hw->cvq && hw->cvq->vq) {
+ virtio_dev_vring_start(hw->cvq->vq);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
+ }
+}
- /* Only rx virtqueue needs mbufs to be allocated at initialization */
- if (queue_type == VTNET_RQ) {
- if (vq->mpool == NULL)
+void
+virtio_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ /*
+ * Start receive and transmit vrings
+ * - Setup vring structure for all queues
+ * - Initialize descriptor for the rx vring
+ * - Allocate blank mbufs for the each rx descriptor
+ *
+ */
+ uint16_t i;
+ uint16_t desc_idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Start rx vring. */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+ struct virtqueue *vq = rxvq->vq;
+ int error, nbufs;
+ struct rte_mbuf *m;
+
+ virtio_dev_vring_start(vq);
+ if (rxvq->mpool == NULL) {
rte_exit(EXIT_FAILURE,
- "Cannot allocate initial mbufs for rx virtqueue");
+ "Cannot allocate mbufs for rx virtqueue");
+ }
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
error = ENOSPC;
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
- if (use_simple_rxtx)
- for (i = 0; i < vq->vq_nentries; i++) {
- vq->vq_ring.avail->ring[i] = i;
- vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
+ if (use_simple_rxtx) {
+ for (desc_idx = 0; desc_idx < vq->vq_nentries;
+ desc_idx++) {
+ vq->vq_ring.avail->ring[desc_idx] = desc_idx;
+ vq->vq_ring.desc[desc_idx].flags =
+ VRING_DESC_F_WRITE;
}
+ }
#endif
- memset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf));
- for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++)
- vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf;
+ memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
+ for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
+ desc_idx++) {
+ vq->sw_ring[vq->vq_nentries + desc_idx] =
+ &rxvq->fake_mbuf;
+ }
while (!virtqueue_full(vq)) {
- m = rte_rxmbuf_alloc(vq->mpool);
+ m = rte_mbuf_raw_alloc(rxvq->mpool);
if (m == NULL)
break;
@@ -368,64 +394,40 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
vq_update_avail_idx(vq);
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
- } else if (queue_type == VTNET_TQ) {
+
+ VIRTQUEUE_DUMP(vq);
+ }
+
+ /* Start tx vring. */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
+ struct virtqueue *vq = txvq->vq;
+
+ virtio_dev_vring_start(vq);
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
if (use_simple_rxtx) {
- int mid_idx = vq->vq_nentries >> 1;
- for (i = 0; i < mid_idx; i++) {
- vq->vq_ring.avail->ring[i] = i + mid_idx;
- vq->vq_ring.desc[i + mid_idx].next = i;
- vq->vq_ring.desc[i + mid_idx].addr =
- vq->virtio_net_hdr_mem +
+ uint16_t mid_idx = vq->vq_nentries >> 1;
+
+ for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
+ vq->vq_ring.avail->ring[desc_idx] =
+ desc_idx + mid_idx;
+ vq->vq_ring.desc[desc_idx + mid_idx].next =
+ desc_idx;
+ vq->vq_ring.desc[desc_idx + mid_idx].addr =
+ txvq->virtio_net_hdr_mem +
offsetof(struct virtio_tx_region, tx_hdr);
- vq->vq_ring.desc[i + mid_idx].len =
+ vq->vq_ring.desc[desc_idx + mid_idx].len =
vq->hw->vtnet_hdr_size;
- vq->vq_ring.desc[i + mid_idx].flags =
+ vq->vq_ring.desc[desc_idx + mid_idx].flags =
VRING_DESC_F_NEXT;
- vq->vq_ring.desc[i].flags = 0;
+ vq->vq_ring.desc[desc_idx].flags = 0;
}
- for (i = mid_idx; i < vq->vq_nentries; i++)
- vq->vq_ring.avail->ring[i] = i;
+ for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
+ desc_idx++)
+ vq->vq_ring.avail->ring[desc_idx] = desc_idx;
}
#endif
- }
-}
-
-void
-virtio_dev_cq_start(struct rte_eth_dev *dev)
-{
- struct virtio_hw *hw = dev->data->dev_private;
-
- if (hw->cvq) {
- virtio_dev_vring_start(hw->cvq, VTNET_CQ);
- VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
- }
-}
-
-void
-virtio_dev_rxtx_start(struct rte_eth_dev *dev)
-{
- /*
- * Start receive and transmit vrings
- * - Setup vring structure for all queues
- * - Initialize descriptor for the rx vring
- * - Allocate blank mbufs for the each rx descriptor
- *
- */
- int i;
-
- PMD_INIT_FUNC_TRACE();
-
- /* Start rx vring. */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ);
- VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
- }
-
- /* Start tx vring. */
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ);
- VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+ VIRTQUEUE_DUMP(vq);
}
}
@@ -438,24 +440,24 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
- struct virtqueue *vq;
+ struct virtnet_rx *rxvq;
int ret;
PMD_INIT_FUNC_TRACE();
ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
- nb_desc, socket_id, &vq);
+ nb_desc, socket_id, (void **)&rxvq);
if (ret < 0) {
PMD_INIT_LOG(ERR, "rvq initialization failed");
return ret;
}
/* Create mempool for rx mbuf allocation */
- vq->mpool = mp;
+ rxvq->mpool = mp;
- dev->data->rx_queues[queue_idx] = vq;
+ dev->data->rx_queues[queue_idx] = rxvq;
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
- virtio_rxq_vec_setup(vq);
+ virtio_rxq_vec_setup(rxvq);
#endif
return 0;
@@ -464,7 +466,16 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
void
virtio_dev_rx_queue_release(void *rxq)
{
- virtio_dev_queue_release(rxq);
+ struct virtnet_rx *rxvq = rxq;
+ struct virtqueue *vq = rxvq->vq;
+ /* rxvq is freed when vq is freed, and as mz should be freed after the
+ * del_queue, so we reserve the mz pointer first.
+ */
+ const struct rte_memzone *mz = rxvq->mz;
+
+ /* no need to free rxq as vq and rxq are allocated together */
+ virtio_dev_queue_release(vq);
+ rte_memzone_free(mz);
}
/*
@@ -486,6 +497,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
struct virtio_hw *hw = dev->data->dev_private;
#endif
+ struct virtnet_tx *txvq;
struct virtqueue *vq;
uint16_t tx_free_thresh;
int ret;
@@ -510,11 +522,12 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
#endif
ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
- nb_desc, socket_id, &vq);
+ nb_desc, socket_id, (void **)&txvq);
if (ret < 0) {
- PMD_INIT_LOG(ERR, "rvq initialization failed");
+ PMD_INIT_LOG(ERR, "tvq initialization failed");
return ret;
}
+ vq = txvq->vq;
tx_free_thresh = tx_conf->tx_free_thresh;
if (tx_free_thresh == 0)
@@ -532,14 +545,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
vq->vq_free_thresh = tx_free_thresh;
- dev->data->tx_queues[queue_idx] = vq;
+ dev->data->tx_queues[queue_idx] = txvq;
return 0;
}
void
virtio_dev_tx_queue_release(void *txq)
{
- virtio_dev_queue_release(txq);
+ struct virtnet_tx *txvq = txq;
+ struct virtqueue *vq = txvq->vq;
+ /* txvq is freed when vq is freed, and as mz should be freed after the
+ * del_queue, so we reserve the mz pointer first.
+ */
+ const struct rte_memzone *hdr_mz = txvq->virtio_net_hdr_mz;
+ const struct rte_memzone *mz = txvq->mz;
+
+ virtio_dev_queue_release(vq);
+ rte_memzone_free(mz);
+ rte_memzone_free(hdr_mz);
}
static void
@@ -558,34 +581,34 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
}
static void
-virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf)
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
{
uint32_t s = mbuf->pkt_len;
struct ether_addr *ea;
if (s == 64) {
- vq->size_bins[1]++;
+ stats->size_bins[1]++;
} else if (s > 64 && s < 1024) {
uint32_t bin;
/* count zeros, and offset into correct bin */
bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
- vq->size_bins[bin]++;
+ stats->size_bins[bin]++;
} else {
if (s < 64)
- vq->size_bins[0]++;
+ stats->size_bins[0]++;
else if (s < 1519)
- vq->size_bins[6]++;
+ stats->size_bins[6]++;
else if (s >= 1519)
- vq->size_bins[7]++;
+ stats->size_bins[7]++;
}
ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
if (is_multicast_ether_addr(ea)) {
if (is_broadcast_ether_addr(ea))
- vq->broadcast++;
+ stats->broadcast++;
else
- vq->multicast++;
+ stats->multicast++;
}
}
@@ -594,7 +617,8 @@ virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf)
uint16_t
virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
- struct virtqueue *rxvq = rx_queue;
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw;
struct rte_mbuf *rxm, *new_mbuf;
uint16_t nb_used, num, nb_rx;
@@ -604,19 +628,19 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint32_t i, nb_enqueued;
uint32_t hdr_size;
- nb_used = VIRTQUEUE_NUSED(rxvq);
+ nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
if (likely(num > DESC_PER_CACHELINE))
- num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
- num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
+ num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
- hw = rxvq->hw;
+ hw = vq->hw;
nb_rx = 0;
nb_enqueued = 0;
hdr_size = hw->vtnet_hdr_size;
@@ -629,8 +653,8 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
- virtio_discard_rxbuf(rxvq, rxm);
- rxvq->errors++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
continue;
}
@@ -651,23 +675,23 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_pkts[nb_rx++] = rxm;
- rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len;
- virtio_update_packet_stats(rxvq, rxm);
+ rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len;
+ virtio_update_packet_stats(&rxvq->stats, rxm);
}
- rxvq->packets += nb_rx;
+ rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
error = ENOSPC;
- while (likely(!virtqueue_full(rxvq))) {
- new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
+ while (likely(!virtqueue_full(vq))) {
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
struct rte_eth_dev *dev
= &rte_eth_devices[rxvq->port_id];
dev->data->rx_mbuf_alloc_failed++;
break;
}
- error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
+ error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
if (unlikely(error)) {
rte_pktmbuf_free(new_mbuf);
break;
@@ -676,11 +700,11 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
if (likely(nb_enqueued)) {
- vq_update_avail_idx(rxvq);
+ vq_update_avail_idx(vq);
- if (unlikely(virtqueue_kick_prepare(rxvq))) {
- virtqueue_notify(rxvq);
- PMD_RX_LOG(DEBUG, "Notified\n");
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
}
}
@@ -692,7 +716,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct virtqueue *rxvq = rx_queue;
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
struct virtio_hw *hw;
struct rte_mbuf *rxm, *new_mbuf;
uint16_t nb_used, num, nb_rx;
@@ -706,13 +731,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint32_t seg_res;
uint32_t hdr_size;
- nb_used = VIRTQUEUE_NUSED(rxvq);
+ nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
- PMD_RX_LOG(DEBUG, "used:%d\n", nb_used);
+ PMD_RX_LOG(DEBUG, "used:%d", nb_used);
- hw = rxvq->hw;
+ hw = vq->hw;
nb_rx = 0;
i = 0;
nb_enqueued = 0;
@@ -727,22 +752,22 @@ virtio_recv_mergeable_pkts(void *rx_queue,
if (nb_rx == nb_pkts)
break;
- num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1);
+ num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
if (num != 1)
continue;
i++;
- PMD_RX_LOG(DEBUG, "dequeue:%d\n", num);
- PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]);
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[0]);
rxm = rcv_pkts[0];
if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
- PMD_RX_LOG(ERR, "Packet drop\n");
+ PMD_RX_LOG(ERR, "Packet drop");
nb_enqueued++;
- virtio_discard_rxbuf(rxvq, rxm);
- rxvq->errors++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
continue;
}
@@ -773,18 +798,18 @@ virtio_recv_mergeable_pkts(void *rx_queue,
*/
uint16_t rcv_cnt =
RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
- if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) {
+ if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
uint32_t rx_num =
- virtqueue_dequeue_burst_rx(rxvq,
+ virtqueue_dequeue_burst_rx(vq,
rcv_pkts, len, rcv_cnt);
i += rx_num;
rcv_cnt = rx_num;
} else {
PMD_RX_LOG(ERR,
- "No enough segments for packet.\n");
+ "No enough segments for packet.");
nb_enqueued++;
- virtio_discard_rxbuf(rxvq, rxm);
- rxvq->errors++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
break;
}
@@ -814,24 +839,24 @@ virtio_recv_mergeable_pkts(void *rx_queue,
VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
rx_pkts[nb_rx]->data_len);
- rxvq->bytes += rx_pkts[nb_rx]->pkt_len;
- virtio_update_packet_stats(rxvq, rx_pkts[nb_rx]);
+ rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;
+ virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
nb_rx++;
}
- rxvq->packets += nb_rx;
+ rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
error = ENOSPC;
- while (likely(!virtqueue_full(rxvq))) {
- new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
+ while (likely(!virtqueue_full(vq))) {
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
struct rte_eth_dev *dev
= &rte_eth_devices[rxvq->port_id];
dev->data->rx_mbuf_alloc_failed++;
break;
}
- error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
+ error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
if (unlikely(error)) {
rte_pktmbuf_free(new_mbuf);
break;
@@ -840,10 +865,10 @@ virtio_recv_mergeable_pkts(void *rx_queue,
}
if (likely(nb_enqueued)) {
- vq_update_avail_idx(rxvq);
+ vq_update_avail_idx(vq);
- if (unlikely(virtqueue_kick_prepare(rxvq))) {
- virtqueue_notify(rxvq);
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
PMD_RX_LOG(DEBUG, "Notified");
}
}
@@ -854,8 +879,9 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t
virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
- struct virtqueue *txvq = tx_queue;
- struct virtio_hw *hw = txvq->hw;
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
uint16_t nb_used, nb_tx;
int error;
@@ -864,11 +890,11 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_pkts;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
- nb_used = VIRTQUEUE_NUSED(txvq);
+ nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
- if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh))
- virtio_xmit_cleanup(txvq, nb_used);
+ if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
+ virtio_xmit_cleanup(vq, nb_used);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
@@ -886,6 +912,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* optimize ring usage */
if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) &&
rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
txm->nb_segs == 1 &&
rte_pktmbuf_headroom(txm) >= hdr_size &&
rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
@@ -901,16 +928,16 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* default => number of segments + 1
*/
slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
- need = slots - txvq->vq_free_cnt;
+ need = slots - vq->vq_free_cnt;
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
- nb_used = VIRTQUEUE_NUSED(txvq);
+ nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
need = RTE_MIN(need, (int)nb_used);
- virtio_xmit_cleanup(txvq, need);
- need = slots - txvq->vq_free_cnt;
+ virtio_xmit_cleanup(vq, need);
+ need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
PMD_TX_LOG(ERR,
"No free tx descriptors to transmit");
@@ -921,17 +948,17 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Enqueue Packet buffers */
virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);
- txvq->bytes += txm->pkt_len;
- virtio_update_packet_stats(txvq, txm);
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
}
- txvq->packets += nb_tx;
+ txvq->stats.packets += nb_tx;
if (likely(nb_tx)) {
- vq_update_avail_idx(txvq);
+ vq_update_avail_idx(vq);
- if (unlikely(virtqueue_kick_prepare(txvq))) {
- virtqueue_notify(txvq);
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
PMD_TX_LOG(DEBUG, "Notified backend after xmit");
}
}
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index a76c3e52..058b56a1 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -31,11 +31,65 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifndef _VIRTIO_RXTX_H_
+#define _VIRTIO_RXTX_H_
+
#define RTE_PMD_VIRTIO_RX_MAX_BURST 64
+struct virtnet_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t multicast;
+ uint64_t broadcast;
+ /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
+ uint64_t size_bins[8];
+};
+
+struct virtnet_rx {
+ struct virtqueue *vq;
+ /* dummy mbuf, for wraparound when processing RX ring. */
+ struct rte_mbuf fake_mbuf;
+ uint64_t mbuf_initializer; /**< value to init mbufs. */
+ struct rte_mempool *mpool; /**< mempool for mbuf allocation */
+
+ uint16_t queue_id; /**< DPDK queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+
+ /* Statistics */
+ struct virtnet_stats stats;
+
+ const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
+};
+
+struct virtnet_tx {
+ struct virtqueue *vq;
+ /**< memzone to populate hdr. */
+ const struct rte_memzone *virtio_net_hdr_mz;
+ phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+
+ uint16_t queue_id; /**< DPDK queue index. */
+ uint8_t port_id; /**< Device port identifier. */
+
+ /* Statistics */
+ struct virtnet_stats stats;
+
+ const struct rte_memzone *mz; /**< mem zone to populate TX ring. */
+};
+
+struct virtnet_ctl {
+ struct virtqueue *vq;
+ /**< memzone to populate hdr. */
+ const struct rte_memzone *virtio_net_hdr_mz;
+ phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ uint8_t port_id; /**< Device port identifier. */
+ const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
+};
+
#ifdef RTE_MACHINE_CPUFLAG_SSSE3
-int virtio_rxq_vec_setup(struct virtqueue *rxq);
+int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
struct rte_mbuf *m);
#endif
+#endif /* _VIRTIO_RXTX_H_ */
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index 8f5293dd..242ad90d 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -80,8 +80,8 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
vq->sw_ring[desc_idx] = cookie;
start_dp = vq->vq_ring.desc;
- start_dp[desc_idx].addr = (uint64_t)((uintptr_t)cookie->buf_physaddr +
- RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size);
+ start_dp[desc_idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset) -
+ vq->hw->vtnet_hdr_size;
start_dp[desc_idx].len = cookie->buf_len -
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
@@ -92,17 +92,18 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
}
static inline void
-virtio_rxq_rearm_vec(struct virtqueue *rxvq)
+virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
{
int i;
uint16_t desc_idx;
struct rte_mbuf **sw_ring;
struct vring_desc *start_dp;
int ret;
+ struct virtqueue *vq = rxvq->vq;
- desc_idx = rxvq->vq_avail_idx & (rxvq->vq_nentries - 1);
- sw_ring = &rxvq->sw_ring[desc_idx];
- start_dp = &rxvq->vq_ring.desc[desc_idx];
+ desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
+ sw_ring = &vq->sw_ring[desc_idx];
+ start_dp = &vq->vq_ring.desc[desc_idx];
ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
RTE_VIRTIO_VPMD_RX_REARM_THRESH);
@@ -119,15 +120,15 @@ virtio_rxq_rearm_vec(struct virtqueue *rxvq)
*(uint64_t *)p = rxvq->mbuf_initializer;
start_dp[i].addr =
- (uint64_t)((uintptr_t)sw_ring[i]->buf_physaddr +
- RTE_PKTMBUF_HEADROOM - rxvq->hw->vtnet_hdr_size);
+ MBUF_DATA_DMA_ADDR(sw_ring[i], vq->offset) -
+ vq->hw->vtnet_hdr_size;
start_dp[i].len = sw_ring[i]->buf_len -
- RTE_PKTMBUF_HEADROOM + rxvq->hw->vtnet_hdr_size;
+ RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
}
- rxvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
- rxvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
- vq_update_avail_idx(rxvq);
+ vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq_update_avail_idx(vq);
}
/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
@@ -143,7 +144,8 @@ uint16_t
virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct virtqueue *rxvq = rx_queue;
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
uint16_t nb_used;
uint16_t desc_idx;
struct vring_used_elem *rused;
@@ -175,15 +177,14 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
len_adjust = _mm_set_epi16(
0, 0,
0,
- (uint16_t)-rxvq->hw->vtnet_hdr_size,
- 0, (uint16_t)-rxvq->hw->vtnet_hdr_size,
+ (uint16_t)-vq->hw->vtnet_hdr_size,
+ 0, (uint16_t)-vq->hw->vtnet_hdr_size,
0, 0);
if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
return 0;
- nb_used = *(volatile uint16_t *)&rxvq->vq_ring.used->idx -
- rxvq->vq_used_cons_idx;
+ nb_used = VIRTQUEUE_NUSED(vq);
rte_compiler_barrier();
@@ -193,17 +194,17 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
nb_used = RTE_MIN(nb_used, nb_pkts);
- desc_idx = (uint16_t)(rxvq->vq_used_cons_idx & (rxvq->vq_nentries - 1));
- rused = &rxvq->vq_ring.used->ring[desc_idx];
- sw_ring = &rxvq->sw_ring[desc_idx];
- sw_ring_end = &rxvq->sw_ring[rxvq->vq_nentries];
+ desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ rused = &vq->vq_ring.used->ring[desc_idx];
+ sw_ring = &vq->sw_ring[desc_idx];
+ sw_ring_end = &vq->sw_ring[vq->vq_nentries];
_mm_prefetch((const void *)rused, _MM_HINT_T0);
- if (rxvq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
- if (unlikely(virtqueue_kick_prepare(rxvq)))
- virtqueue_notify(rxvq);
+ if (unlikely(virtqueue_kick_prepare(vq)))
+ virtqueue_notify(vq);
}
for (nb_pkts_received = 0;
@@ -286,9 +287,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
}
}
- rxvq->vq_used_cons_idx += nb_pkts_received;
- rxvq->vq_free_cnt += nb_pkts_received;
- rxvq->packets += nb_pkts_received;
+ vq->vq_used_cons_idx += nb_pkts_received;
+ vq->vq_free_cnt += nb_pkts_received;
+ rxvq->stats.packets += nb_pkts_received;
return nb_pkts_received;
}
@@ -342,31 +343,32 @@ uint16_t
virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct virtqueue *txvq = tx_queue;
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
uint16_t nb_used;
uint16_t desc_idx;
struct vring_desc *start_dp;
uint16_t nb_tail, nb_commit;
int i;
- uint16_t desc_idx_max = (txvq->vq_nentries >> 1) - 1;
+ uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
- nb_used = VIRTQUEUE_NUSED(txvq);
+ nb_used = VIRTQUEUE_NUSED(vq);
rte_compiler_barrier();
if (nb_used >= VIRTIO_TX_FREE_THRESH)
- virtio_xmit_cleanup(tx_queue);
+ virtio_xmit_cleanup(vq);
- nb_commit = nb_pkts = RTE_MIN((txvq->vq_free_cnt >> 1), nb_pkts);
- desc_idx = (uint16_t) (txvq->vq_avail_idx & desc_idx_max);
- start_dp = txvq->vq_ring.desc;
+ nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
+ desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
+ start_dp = vq->vq_ring.desc;
nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
if (nb_commit >= nb_tail) {
for (i = 0; i < nb_tail; i++)
- txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
+ vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
for (i = 0; i < nb_tail; i++) {
start_dp[desc_idx].addr =
- rte_mbuf_data_dma_addr(*tx_pkts);
+ MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
tx_pkts++;
desc_idx++;
@@ -375,9 +377,10 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
desc_idx = 0;
}
for (i = 0; i < nb_commit; i++)
- txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
+ vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
for (i = 0; i < nb_commit; i++) {
- start_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts);
+ start_dp[desc_idx].addr =
+ MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
tx_pkts++;
desc_idx++;
@@ -385,21 +388,21 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_compiler_barrier();
- txvq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
- txvq->vq_avail_idx += nb_pkts;
- txvq->vq_ring.avail->idx = txvq->vq_avail_idx;
- txvq->packets += nb_pkts;
+ vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
+ vq->vq_avail_idx += nb_pkts;
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+ txvq->stats.packets += nb_pkts;
if (likely(nb_pkts)) {
- if (unlikely(virtqueue_kick_prepare(txvq)))
- virtqueue_notify(txvq);
+ if (unlikely(virtqueue_kick_prepare(vq)))
+ virtqueue_notify(vq);
}
return nb_pkts;
}
int __attribute__((cold))
-virtio_rxq_vec_setup(struct virtqueue *rxq)
+virtio_rxq_vec_setup(struct virtnet_rx *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h
new file mode 100644
index 00000000..7adb55f5
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/vhost.h
@@ -0,0 +1,146 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VHOST_NET_USER_H
+#define _VHOST_NET_USER_H
+
+#include <stdint.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include "../virtio_pci.h"
+#include "../virtio_logs.h"
+#include "../virtqueue.h"
+
+#define VHOST_MEMORY_MAX_NREGIONS 8
+
+struct vhost_vring_state {
+ unsigned int index;
+ unsigned int num;
+};
+
+struct vhost_vring_file {
+ unsigned int index;
+ int fd;
+};
+
+struct vhost_vring_addr {
+ unsigned int index;
+ /* Option flags. */
+ unsigned int flags;
+ /* Flag values: */
+ /* Whether log address is valid. If set enables logging. */
+#define VHOST_VRING_F_LOG 0
+
+ /* Start of array of descriptors (virtually contiguous) */
+ uint64_t desc_user_addr;
+ /* Used structure address. Must be 32 bit aligned */
+ uint64_t used_user_addr;
+ /* Available structure address. Must be 16 bit aligned */
+ uint64_t avail_user_addr;
+ /* Logging support. */
+ /* Log writes to used structure, at offset calculated from specified
+ * address. Address must be 32 bit aligned.
+ */
+ uint64_t log_guest_addr;
+};
+
+enum vhost_user_request {
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_MAX
+};
+
+struct vhost_memory_region {
+ uint64_t guest_phys_addr;
+ uint64_t memory_size; /* bytes */
+ uint64_t userspace_addr;
+ uint64_t mmap_offset;
+};
+
+struct vhost_memory {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS];
+};
+
+struct vhost_user_msg {
+ enum vhost_user_request request;
+
+#define VHOST_USER_VERSION_MASK 0x3
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+#define VHOST_USER_VRING_IDX_MASK 0xff
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ struct vhost_memory memory;
+ } payload;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+} __attribute((packed));
+
+#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64)
+#define VHOST_USER_PAYLOAD_SIZE \
+ (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE)
+
+/* The version of the protocol we support */
+#define VHOST_USER_VERSION 0x1
+
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#define VHOST_USER_MQ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
+
+int vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg);
+int vhost_user_setup(const char *path);
+int vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable);
+
+#endif
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
new file mode 100644
index 00000000..a2b0687f
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -0,0 +1,426 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/un.h>
+#include <string.h>
+#include <errno.h>
+
+#include "vhost.h"
+
+static int
+vhost_user_write(int fd, void *buf, int len, int *fds, int fd_num)
+{
+ int r;
+ struct msghdr msgh;
+ struct iovec iov;
+ size_t fd_size = fd_num * sizeof(int);
+ char control[CMSG_SPACE(fd_size)];
+ struct cmsghdr *cmsg;
+
+ memset(&msgh, 0, sizeof(msgh));
+ memset(control, 0, sizeof(control));
+
+ iov.iov_base = (uint8_t *)buf;
+ iov.iov_len = len;
+
+ msgh.msg_iov = &iov;
+ msgh.msg_iovlen = 1;
+ msgh.msg_control = control;
+ msgh.msg_controllen = sizeof(control);
+
+ cmsg = CMSG_FIRSTHDR(&msgh);
+ cmsg->cmsg_len = CMSG_LEN(fd_size);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ memcpy(CMSG_DATA(cmsg), fds, fd_size);
+
+ do {
+ r = sendmsg(fd, &msgh, 0);
+ } while (r < 0 && errno == EINTR);
+
+ return r;
+}
+
+static int
+vhost_user_read(int fd, struct vhost_user_msg *msg)
+{
+ uint32_t valid_flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION;
+ int ret, sz_hdr = VHOST_USER_HDR_SIZE, sz_payload;
+
+ ret = recv(fd, (void *)msg, sz_hdr, 0);
+ if (ret < sz_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to recv msg hdr: %d instead of %d.",
+ ret, sz_hdr);
+ goto fail;
+ }
+
+ /* validate msg flags */
+ if (msg->flags != (valid_flags)) {
+ PMD_DRV_LOG(ERR, "Failed to recv msg: flags %x instead of %x.",
+ msg->flags, valid_flags);
+ goto fail;
+ }
+
+ sz_payload = msg->size;
+ if (sz_payload) {
+ ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0);
+ if (ret < sz_payload) {
+ PMD_DRV_LOG(ERR,
+ "Failed to recv msg payload: %d instead of %d.",
+ ret, msg->size);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ return -1;
+}
+
+struct hugepage_file_info {
+ uint64_t addr; /**< virtual addr */
+ size_t size; /**< the file size */
+ char path[PATH_MAX]; /**< path to backing file */
+};
+
+/* Two possible options:
+ * 1. Match HUGEPAGE_INFO_FMT to find the file storing struct hugepage_file
+ * array. This is simple but cannot be used in secondary process because
+ * secondary process will close and munmap that file.
+ * 2. Match HUGEFILE_FMT to find hugepage files directly.
+ *
+ * We choose option 2.
+ */
+static int
+get_hugepage_file_info(struct hugepage_file_info huges[], int max)
+{
+ int idx;
+ FILE *f;
+ char buf[BUFSIZ], *tmp, *tail;
+ char *str_underline, *str_start;
+ int huge_index;
+ uint64_t v_start, v_end;
+
+ f = fopen("/proc/self/maps", "r");
+ if (!f) {
+ PMD_DRV_LOG(ERR, "cannot open /proc/self/maps");
+ return -1;
+ }
+
+ idx = 0;
+ while (fgets(buf, sizeof(buf), f) != NULL) {
+ if (sscanf(buf, "%" PRIx64 "-%" PRIx64, &v_start, &v_end) < 2) {
+ PMD_DRV_LOG(ERR, "Failed to parse address");
+ goto error;
+ }
+
+ tmp = strchr(buf, ' ') + 1; /** skip address */
+ tmp = strchr(tmp, ' ') + 1; /** skip perm */
+ tmp = strchr(tmp, ' ') + 1; /** skip offset */
+ tmp = strchr(tmp, ' ') + 1; /** skip dev */
+ tmp = strchr(tmp, ' ') + 1; /** skip inode */
+ while (*tmp == ' ') /** skip spaces */
+ tmp++;
+ tail = strrchr(tmp, '\n'); /** remove newline if exists */
+ if (tail)
+ *tail = '\0';
+
+ /* Match HUGEFILE_FMT, aka "%s/%smap_%d",
+ * which is defined in eal_filesystem.h
+ */
+ str_underline = strrchr(tmp, '_');
+ if (!str_underline)
+ continue;
+
+ str_start = str_underline - strlen("map");
+ if (str_start < tmp)
+ continue;
+
+ if (sscanf(str_start, "map_%d", &huge_index) != 1)
+ continue;
+
+ if (idx >= max) {
+ PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
+ goto error;
+ }
+ huges[idx].addr = v_start;
+ huges[idx].size = v_end - v_start;
+ strcpy(huges[idx].path, tmp);
+ idx++;
+ }
+
+ fclose(f);
+ return idx;
+
+error:
+ fclose(f);
+ return -1;
+}
+
+static int
+prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
+{
+ int i, num;
+ struct hugepage_file_info huges[VHOST_MEMORY_MAX_NREGIONS];
+ struct vhost_memory_region *mr;
+
+ num = get_hugepage_file_info(huges, VHOST_MEMORY_MAX_NREGIONS);
+ if (num < 0) {
+ PMD_INIT_LOG(ERR, "Failed to prepare memory for vhost-user");
+ return -1;
+ }
+
+ for (i = 0; i < num; ++i) {
+ mr = &msg->payload.memory.regions[i];
+ mr->guest_phys_addr = huges[i].addr; /* use vaddr! */
+ mr->userspace_addr = huges[i].addr;
+ mr->memory_size = huges[i].size;
+ mr->mmap_offset = 0;
+ fds[i] = open(huges[i].path, O_RDWR);
+ }
+
+ msg->payload.memory.nregions = num;
+ msg->payload.memory.padding = 0;
+
+ return 0;
+}
+
+static struct vhost_user_msg m;
+
+static const char * const vhost_msg_strings[] = {
+ [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
+ [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
+ [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
+ [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
+ [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
+ [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
+ [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
+ [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
+ [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
+ [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
+ [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
+ [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
+ NULL,
+};
+
+int
+vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg)
+{
+ struct vhost_user_msg msg;
+ struct vhost_vring_file *file = 0;
+ int need_reply = 0;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ int fd_num = 0;
+ int i, len;
+
+ RTE_SET_USED(m);
+ RTE_SET_USED(vhost_msg_strings);
+
+ PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
+
+ msg.request = req;
+ msg.flags = VHOST_USER_VERSION;
+ msg.size = 0;
+
+ switch (req) {
+ case VHOST_USER_GET_FEATURES:
+ need_reply = 1;
+ break;
+
+ case VHOST_USER_SET_FEATURES:
+ case VHOST_USER_SET_LOG_BASE:
+ msg.payload.u64 = *((__u64 *)arg);
+ msg.size = sizeof(m.payload.u64);
+ break;
+
+ case VHOST_USER_SET_OWNER:
+ case VHOST_USER_RESET_OWNER:
+ break;
+
+ case VHOST_USER_SET_MEM_TABLE:
+ if (prepare_vhost_memory_user(&msg, fds) < 0)
+ return -1;
+ fd_num = msg.payload.memory.nregions;
+ msg.size = sizeof(m.payload.memory.nregions);
+ msg.size += sizeof(m.payload.memory.padding);
+ msg.size += fd_num * sizeof(struct vhost_memory_region);
+ break;
+
+ case VHOST_USER_SET_LOG_FD:
+ fds[fd_num++] = *((int *)arg);
+ break;
+
+ case VHOST_USER_SET_VRING_NUM:
+ case VHOST_USER_SET_VRING_BASE:
+ case VHOST_USER_SET_VRING_ENABLE:
+ memcpy(&msg.payload.state, arg, sizeof(msg.payload.state));
+ msg.size = sizeof(m.payload.state);
+ break;
+
+ case VHOST_USER_GET_VRING_BASE:
+ memcpy(&msg.payload.state, arg, sizeof(msg.payload.state));
+ msg.size = sizeof(m.payload.state);
+ need_reply = 1;
+ break;
+
+ case VHOST_USER_SET_VRING_ADDR:
+ memcpy(&msg.payload.addr, arg, sizeof(msg.payload.addr));
+ msg.size = sizeof(m.payload.addr);
+ break;
+
+ case VHOST_USER_SET_VRING_KICK:
+ case VHOST_USER_SET_VRING_CALL:
+ case VHOST_USER_SET_VRING_ERR:
+ file = arg;
+ msg.payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
+ msg.size = sizeof(m.payload.u64);
+ if (file->fd > 0)
+ fds[fd_num++] = file->fd;
+ else
+ msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "trying to send unhandled msg type");
+ return -1;
+ }
+
+ len = VHOST_USER_HDR_SIZE + msg.size;
+ if (vhost_user_write(vhostfd, &msg, len, fds, fd_num) < 0) {
+ PMD_DRV_LOG(ERR, "%s failed: %s",
+ vhost_msg_strings[req], strerror(errno));
+ return -1;
+ }
+
+ if (req == VHOST_USER_SET_MEM_TABLE)
+ for (i = 0; i < fd_num; ++i)
+ close(fds[i]);
+
+ if (need_reply) {
+ if (vhost_user_read(vhostfd, &msg) < 0) {
+ PMD_DRV_LOG(ERR, "Received msg failed: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ if (req != msg.request) {
+ PMD_DRV_LOG(ERR, "Received unexpected msg type");
+ return -1;
+ }
+
+ switch (req) {
+ case VHOST_USER_GET_FEATURES:
+ if (msg.size != sizeof(m.payload.u64)) {
+ PMD_DRV_LOG(ERR, "Received bad msg size");
+ return -1;
+ }
+ *((__u64 *)arg) = msg.payload.u64;
+ break;
+ case VHOST_USER_GET_VRING_BASE:
+ if (msg.size != sizeof(m.payload.state)) {
+ PMD_DRV_LOG(ERR, "Received bad msg size");
+ return -1;
+ }
+ memcpy(arg, &msg.payload.state,
+ sizeof(struct vhost_vring_state));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Received unexpected msg type");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Set up environment to talk with a vhost user backend.
+ * @param path
+ * - The path to vhost user unix socket file.
+ *
+ * @return
+ * - (-1) if fail to set up;
+ * - (>=0) if successful, and it is the fd to vhostfd.
+ */
+int
+vhost_user_setup(const char *path)
+{
+ int fd;
+ int flag;
+ struct sockaddr_un un;
+
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0) {
+ PMD_DRV_LOG(ERR, "socket() error, %s", strerror(errno));
+ return -1;
+ }
+
+ flag = fcntl(fd, F_GETFD);
+ fcntl(fd, F_SETFD, flag | FD_CLOEXEC);
+
+ memset(&un, 0, sizeof(un));
+ un.sun_family = AF_UNIX;
+ snprintf(un.sun_path, sizeof(un.sun_path), "%s", path);
+ if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+ PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+int
+vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable)
+{
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ struct vhost_vring_state state = {
+ .index = pair_idx * 2 + i,
+ .num = enable,
+ };
+
+ if (vhost_user_sock(vhostfd,
+ VHOST_USER_SET_VRING_ENABLE, &state))
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
new file mode 100644
index 00000000..3d12a320
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -0,0 +1,333 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/eventfd.h>
+
+#include "vhost.h"
+#include "virtio_user_dev.h"
+#include "../virtio_ethdev.h"
+
+static int
+virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+ int callfd, kickfd;
+ struct vhost_vring_file file;
+ struct vhost_vring_state state;
+ struct vring *vring = &dev->vrings[queue_sel];
+ struct vhost_vring_addr addr = {
+ .index = queue_sel,
+ .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
+ .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
+ .used_user_addr = (uint64_t)(uintptr_t)vring->used,
+ .log_guest_addr = 0,
+ .flags = 0, /* disable log */
+ };
+
+ /* May use invalid flag, but some backend leverages kickfd and callfd as
+ * criteria to judge if dev is alive. so finally we use real event_fd.
+ */
+ callfd = eventfd(0, O_CLOEXEC | O_NONBLOCK);
+ if (callfd < 0) {
+ PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno));
+ return -1;
+ }
+ kickfd = eventfd(0, O_CLOEXEC | O_NONBLOCK);
+ if (kickfd < 0) {
+ close(callfd);
+ PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
+ return -1;
+ }
+
+ /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
+ * firstly because vhost depends on this msg to allocate virtqueue
+ * pair.
+ */
+ file.index = queue_sel;
+ file.fd = callfd;
+ vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_CALL, &file);
+ dev->callfds[queue_sel] = callfd;
+
+ state.index = queue_sel;
+ state.num = vring->num;
+ vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_NUM, &state);
+
+ state.num = 0; /* no reservation */
+ vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_BASE, &state);
+
+ vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_ADDR, &addr);
+
+ /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
+ * lastly because vhost depends on this msg to judge if
+ * virtio is ready.
+ */
+ file.fd = kickfd;
+ vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_KICK, &file);
+ dev->kickfds[queue_sel] = kickfd;
+
+ return 0;
+}
+
+int
+virtio_user_start_device(struct virtio_user_dev *dev)
+{
+ uint64_t features;
+ uint32_t i, queue_sel;
+ int ret;
+
+ /* construct memory region inside each implementation */
+ ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL);
+ if (ret < 0)
+ goto error;
+
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
+ if (virtio_user_kick_queue(dev, queue_sel) < 0) {
+ PMD_DRV_LOG(INFO, "kick rx vq fails: %u", i);
+ goto error;
+ }
+ }
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
+ if (virtio_user_kick_queue(dev, queue_sel) < 0) {
+ PMD_DRV_LOG(INFO, "kick tx vq fails: %u", i);
+ goto error;
+ }
+ }
+
+ /* After setup all virtqueues, we need to set_features so that these
+ * features can be set into each virtqueue in vhost side. And before
+ * that, make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is
+ * enabled, and VIRTIO_NET_F_MAC is stripped.
+ */
+ features = dev->features;
+ if (dev->max_queue_pairs > 1)
+ features |= VHOST_USER_MQ;
+ features &= ~(1ull << VIRTIO_NET_F_MAC);
+ ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features);
+ if (ret < 0)
+ goto error;
+ PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
+
+ return 0;
+error:
+ /* TODO: free resource here or caller to check */
+ return -1;
+}
+
+int virtio_user_stop_device(struct virtio_user_dev *dev)
+{
+ return vhost_user_sock(dev->vhostfd, VHOST_USER_RESET_OWNER, NULL);
+}
+
+static inline void
+parse_mac(struct virtio_user_dev *dev, const char *mac)
+{
+ int i, r;
+ uint32_t tmp[ETHER_ADDR_LEN];
+
+ if (!mac)
+ return;
+
+ r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
+ &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
+ if (r == ETHER_ADDR_LEN) {
+ for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ dev->mac_addr[i] = (uint8_t)tmp[i];
+ dev->mac_specified = 1;
+ } else {
+ /* ignore the wrong mac, use random mac */
+ PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
+ }
+}
+
+int
+virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
+ int cq, int queue_size, const char *mac)
+{
+ strncpy(dev->path, path, PATH_MAX);
+ dev->max_queue_pairs = queues;
+ dev->queue_pairs = 1; /* mq disabled by default */
+ dev->queue_size = queue_size;
+ dev->mac_specified = 0;
+ parse_mac(dev, mac);
+ dev->vhostfd = -1;
+
+ dev->vhostfd = vhost_user_setup(dev->path);
+ if (dev->vhostfd < 0) {
+ PMD_INIT_LOG(ERR, "backend set up fails");
+ return -1;
+ }
+ if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) {
+ PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
+ return -1;
+ }
+
+ if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES,
+ &dev->features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
+ return -1;
+ }
+ if (dev->mac_specified)
+ dev->features |= (1ull << VIRTIO_NET_F_MAC);
+
+ if (!cq) {
+ dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
+ /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
+ dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
+ dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
+ dev->features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+ dev->features &= ~(1ull << VIRTIO_NET_F_MQ);
+ dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ } else {
+ /* vhost user backend does not need to know ctrl-q, so
+ * actually we need add this bit into features. However,
+ * DPDK vhost-user does send features with this bit, so we
+ * check it instead of OR it for now.
+ */
+ if (!(dev->features & (1ull << VIRTIO_NET_F_CTRL_VQ)))
+ PMD_INIT_LOG(INFO, "vhost does not support ctrl-q");
+ }
+
+ if (dev->max_queue_pairs > 1) {
+ if (!(dev->features & VHOST_USER_MQ)) {
+ PMD_INIT_LOG(ERR, "MQ not supported by the backend");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+void
+virtio_user_dev_uninit(struct virtio_user_dev *dev)
+{
+ uint32_t i;
+
+ for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+ close(dev->callfds[i]);
+ close(dev->kickfds[i]);
+ }
+
+ close(dev->vhostfd);
+}
+
+static uint8_t
+virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
+{
+ uint16_t i;
+ uint8_t ret = 0;
+
+ if (q_pairs > dev->max_queue_pairs) {
+ PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
+ q_pairs, dev->max_queue_pairs);
+ return -1;
+ }
+
+ for (i = 0; i < q_pairs; ++i)
+ ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 1);
+ for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+ ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+
+ dev->queue_pairs = q_pairs;
+
+ return ret;
+}
+
+static uint32_t
+virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
+ uint16_t idx_hdr)
+{
+ struct virtio_net_ctrl_hdr *hdr;
+ virtio_net_ctrl_ack status = ~0;
+ uint16_t i, idx_data, idx_status;
+ uint32_t n_descs = 0;
+
+ /* locate desc for header, data, and status */
+ idx_data = vring->desc[idx_hdr].next;
+ n_descs++;
+
+ i = idx_data;
+ while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
+ i = vring->desc[i].next;
+ n_descs++;
+ }
+
+ /* locate desc for status */
+ idx_status = i;
+ n_descs++;
+
+ hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
+ if (hdr->class == VIRTIO_NET_CTRL_MQ &&
+ hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ uint16_t queues;
+
+ queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
+ status = virtio_user_handle_mq(dev, queues);
+ }
+
+ /* Update status */
+ *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
+
+ return n_descs;
+}
+
+void
+virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
+{
+ uint16_t avail_idx, desc_idx;
+ struct vring_used_elem *uep;
+ uint32_t n_descs;
+ struct vring *vring = &dev->vrings[queue_idx];
+
+ /* Consume avail ring, using used ring idx as first one */
+ while (vring->used->idx != vring->avail->idx) {
+ avail_idx = (vring->used->idx) & (vring->num - 1);
+ desc_idx = vring->avail->ring[avail_idx];
+
+ n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
+
+ /* Update used ring */
+ uep = &vring->used->ring[avail_idx];
+ uep->id = avail_idx;
+ uep->len = n_descs;
+
+ vring->used->idx++;
+ }
+}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
new file mode 100644
index 00000000..33690b5c
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTIO_USER_DEV_H
+#define _VIRTIO_USER_DEV_H
+
+#include <limits.h>
+#include "../virtio_pci.h"
+#include "../virtio_ring.h"
+
+struct virtio_user_dev {
+ int vhostfd;
+ int callfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+ int kickfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+ int mac_specified;
+ uint32_t max_queue_pairs;
+ uint32_t queue_pairs;
+ uint32_t queue_size;
+ uint64_t features;
+ uint8_t status;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ char path[PATH_MAX];
+ struct vring vrings[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+};
+
+int virtio_user_start_device(struct virtio_user_dev *dev);
+int virtio_user_stop_device(struct virtio_user_dev *dev);
+int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
+ int cq, int queue_size, const char *mac);
+void virtio_user_dev_uninit(struct virtio_user_dev *dev);
+void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
+#endif
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
new file mode 100644
index 00000000..5ab24711
--- /dev/null
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -0,0 +1,440 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+
+#include "virtio_ethdev.h"
+#include "virtio_logs.h"
+#include "virtio_pci.h"
+#include "virtqueue.h"
+#include "virtio_rxtx.h"
+#include "virtio_user/virtio_user_dev.h"
+
+#define virtio_user_get_dev(hw) \
+ ((struct virtio_user_dev *)(hw)->virtio_user_dev)
+
+static void
+virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (offset == offsetof(struct virtio_net_config, mac) &&
+ length == ETHER_ADDR_LEN) {
+ for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ ((uint8_t *)dst)[i] = dev->mac_addr[i];
+ return;
+ }
+
+ if (offset == offsetof(struct virtio_net_config, status))
+ *(uint16_t *)dst = dev->status;
+
+ if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
+ *(uint16_t *)dst = dev->max_queue_pairs;
+}
+
+static void
+virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if ((offset == offsetof(struct virtio_net_config, mac)) &&
+ (length == ETHER_ADDR_LEN))
+ for (i = 0; i < ETHER_ADDR_LEN; ++i)
+ dev->mac_addr[i] = ((const uint8_t *)src)[i];
+ else
+ PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d\n",
+ offset, length);
+}
+
+static void
+virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+ virtio_user_start_device(dev);
+ dev->status = status;
+}
+
+static void
+virtio_user_reset(struct virtio_hw *hw)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ virtio_user_stop_device(dev);
+}
+
+static uint8_t
+virtio_user_get_status(struct virtio_hw *hw)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ return dev->status;
+}
+
+static uint64_t
+virtio_user_get_features(struct virtio_hw *hw)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ return dev->features;
+}
+
+static void
+virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ dev->features = features;
+}
+
+static uint8_t
+virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
+{
+ /* When config interrupt happens, driver calls this function to query
+ * what kinds of change happen. Interrupt mode not supported for now.
+ */
+ return 0;
+}
+
+static uint16_t
+virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
+ uint16_t vec __rte_unused)
+{
+ return VIRTIO_MSI_NO_VECTOR;
+}
+
+/* This function is to get the queue size, aka, number of descs, of a specified
+ * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
+ * max supported queues.
+ */
+static uint16_t
+virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ /* Currently, each queue has same queue size */
+ return dev->queue_size;
+}
+
+static int
+virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+ uint16_t queue_idx = vq->vq_queue_index;
+ uint64_t desc_addr, avail_addr, used_addr;
+
+ desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ dev->vrings[queue_idx].num = vq->vq_nentries;
+ dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
+ dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
+ dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
+
+ return 0;
+}
+
+static void
+virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
+ * correspondingly stops the ioeventfds, and reset the status of
+ * the device.
+ * For modern devices, set queue desc, avail, used in PCI bar to 0,
+ * not see any more behavior in QEMU.
+ *
+ * Here we just care about what information to deliver to vhost-user
+ * or vhost-kernel. So we just close ioeventfd for now.
+ */
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ close(dev->callfds[vq->vq_queue_index]);
+ close(dev->kickfds[vq->vq_queue_index]);
+}
+
+static void
+virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ uint64_t buf = 1;
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (hw->cvq && (hw->cvq->vq == vq)) {
+ virtio_user_handle_cq(dev, vq->vq_queue_index);
+ return;
+ }
+
+ if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
+ PMD_DRV_LOG(ERR, "failed to kick backend: %s\n",
+ strerror(errno));
+}
+
+static const struct virtio_pci_ops virtio_user_ops = {
+ .read_dev_cfg = virtio_user_read_dev_config,
+ .write_dev_cfg = virtio_user_write_dev_config,
+ .reset = virtio_user_reset,
+ .get_status = virtio_user_get_status,
+ .set_status = virtio_user_set_status,
+ .get_features = virtio_user_get_features,
+ .set_features = virtio_user_set_features,
+ .get_isr = virtio_user_get_isr,
+ .set_config_irq = virtio_user_set_config_irq,
+ .get_queue_num = virtio_user_get_queue_num,
+ .setup_queue = virtio_user_setup_queue,
+ .del_queue = virtio_user_del_queue,
+ .notify_queue = virtio_user_notify_queue,
+};
+
+static const char *valid_args[] = {
+#define VIRTIO_USER_ARG_QUEUES_NUM "queues"
+ VIRTIO_USER_ARG_QUEUES_NUM,
+#define VIRTIO_USER_ARG_CQ_NUM "cq"
+ VIRTIO_USER_ARG_CQ_NUM,
+#define VIRTIO_USER_ARG_MAC "mac"
+ VIRTIO_USER_ARG_MAC,
+#define VIRTIO_USER_ARG_PATH "path"
+ VIRTIO_USER_ARG_PATH,
+#define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
+ VIRTIO_USER_ARG_QUEUE_SIZE,
+ NULL
+};
+
+#define VIRTIO_USER_DEF_CQ_EN 0
+#define VIRTIO_USER_DEF_Q_NUM 1
+#define VIRTIO_USER_DEF_Q_SZ 256
+
+static int
+get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ return 0;
+}
+
+static int
+get_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint64_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+
+static struct rte_eth_dev *
+virtio_user_eth_dev_alloc(const char *name)
+{
+ struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev_data *data;
+ struct virtio_hw *hw;
+ struct virtio_user_dev *dev;
+
+ eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
+ return NULL;
+ }
+
+ data = eth_dev->data;
+
+ hw = rte_zmalloc(NULL, sizeof(*hw), 0);
+ if (!hw) {
+ PMD_INIT_LOG(ERR, "malloc virtio_hw failed");
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+
+ dev = rte_zmalloc(NULL, sizeof(*dev), 0);
+ if (!dev) {
+ PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed");
+ rte_eth_dev_release_port(eth_dev);
+ rte_free(hw);
+ return NULL;
+ }
+
+ hw->vtpci_ops = &virtio_user_ops;
+ hw->use_msix = 0;
+ hw->modern = 0;
+ hw->virtio_user_dev = dev;
+ data->dev_private = hw;
+ data->numa_node = SOCKET_ID_ANY;
+ data->kdrv = RTE_KDRV_NONE;
+ data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+ eth_dev->pci_dev = NULL;
+ eth_dev->driver = NULL;
+ return eth_dev;
+}
+
+/* Dev initialization routine. Invoked once for each virtio vdev at
+ * EAL init time, see rte_eal_dev_init().
+ * Returns 0 on success.
+ */
+static int
+virtio_user_pmd_devinit(const char *name, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ struct rte_eth_dev *eth_dev;
+ struct virtio_hw *hw;
+ uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
+ uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
+ uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
+ char *path = NULL;
+ char *mac_addr = NULL;
+ int ret = -1;
+
+ if (!params || params[0] == '\0') {
+ PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio-user",
+ VIRTIO_USER_ARG_QUEUE_SIZE);
+ goto end;
+ }
+
+ kvlist = rte_kvargs_parse(params, valid_args);
+ if (!kvlist) {
+ PMD_INIT_LOG(ERR, "error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1)
+ rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
+ &get_string_arg, &path);
+ else {
+ PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio-user\n",
+ VIRTIO_USER_ARG_QUEUE_SIZE);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1)
+ rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
+ &get_string_arg, &mac_addr);
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1)
+ rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
+ &get_integer_arg, &queue_size);
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1)
+ rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
+ &get_integer_arg, &queues);
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1)
+ rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
+ &get_integer_arg, &cq);
+ else if (queues > 1)
+ cq = 1;
+
+ if (queues > 1 && cq == 0) {
+ PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
+ goto end;
+ }
+
+ eth_dev = virtio_user_eth_dev_alloc(name);
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "virtio-user fails to alloc device");
+ goto end;
+ }
+
+ hw = eth_dev->data->dev_private;
+ if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
+ queue_size, mac_addr) < 0)
+ goto end;
+
+ /* previously called by rte_eal_pci_probe() for physical dev */
+ if (eth_virtio_dev_init(eth_dev) < 0) {
+ PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
+ goto end;
+ }
+ ret = 0;
+
+end:
+ if (path)
+ free(path);
+ if (mac_addr)
+ free(mac_addr);
+ return ret;
+}
+
+/** Called by rte_eth_dev_detach() */
+static int
+virtio_user_pmd_devuninit(const char *name)
+{
+ struct rte_eth_dev *eth_dev;
+ struct virtio_hw *hw;
+ struct virtio_user_dev *dev;
+
+ if (!name)
+ return -EINVAL;
+
+ PMD_DRV_LOG(INFO, "Un-Initializing %s\n", name);
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev)
+ return -ENODEV;
+
+ /* make sure the device is stopped, queues freed */
+ rte_eth_dev_close(eth_dev->data->port_id);
+
+ hw = eth_dev->data->dev_private;
+ dev = hw->virtio_user_dev;
+ virtio_user_dev_uninit(dev);
+
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data);
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_driver virtio_user_driver = {
+ .name = "virtio-user",
+ .type = PMD_VDEV,
+ .init = virtio_user_pmd_devinit,
+ .uninit = virtio_user_pmd_devuninit,
+};
+
+PMD_REGISTER_DRIVER(virtio_user_driver);
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 4e9239e0..455aaafe 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -66,6 +66,14 @@ struct rte_mbuf;
#define VIRTQUEUE_MAX_NAME_SZ 32
+#ifdef RTE_VIRTIO_USER
+#define MBUF_DATA_DMA_ADDR(mb, offset) \
+ ((uint64_t)((uintptr_t)(*(void **)((uintptr_t)mb + offset)) \
+ + (mb)->data_off))
+#else /* RTE_VIRTIO_USER */
+#define MBUF_DATA_DMA_ADDR(mb, offset) rte_mbuf_data_dma_addr(mb)
+#endif /* RTE_VIRTIO_USER */
+
#define VTNET_SQ_RQ_QUEUE_IDX 0
#define VTNET_SQ_TQ_QUEUE_IDX 1
#define VTNET_SQ_CQ_QUEUE_IDX 2
@@ -153,23 +161,30 @@ struct virtio_pmd_ctrl {
uint8_t data[VIRTIO_MAX_CTRL_DATA];
};
+struct vq_desc_extra {
+ void *cookie;
+ uint16_t ndescs;
+};
+
struct virtqueue {
- struct virtio_hw *hw; /**< virtio_hw structure pointer. */
- const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
- const struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */
- struct rte_mempool *mpool; /**< mempool for mbuf allocation */
- uint16_t queue_id; /**< DPDK queue index. */
- uint8_t port_id; /**< Device port identifier. */
- uint16_t vq_queue_index; /**< PCI queue index */
-
- void *vq_ring_virt_mem; /**< linear address of vring*/
+ struct virtio_hw *hw; /**< virtio_hw structure pointer. */
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_nentries; /**< vring desc numbers */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_avail_idx; /**< sync until needed */
+ uint16_t vq_free_thresh; /**< free threshold */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
unsigned int vq_ring_size;
- phys_addr_t vq_ring_mem; /**< physical address of vring */
- struct vring vq_ring; /**< vring keeping desc, used and avail */
- uint16_t vq_free_cnt; /**< num of desc available */
- uint16_t vq_nentries; /**< vring desc numbers */
- uint16_t vq_free_thresh; /**< free threshold */
+ phys_addr_t vq_ring_mem; /**< physical address of vring */
+ /**< use virtual address for virtio-user. */
+
/**
* Head of the free chain in the descriptor table. If
* there are no free descriptors, this will be set to
@@ -177,34 +192,12 @@ struct virtqueue {
*/
uint16_t vq_desc_head_idx;
uint16_t vq_desc_tail_idx;
- /**
- * Last consumed descriptor in the used table,
- * trails vq_ring.used->idx.
- */
- uint16_t vq_used_cons_idx;
- uint16_t vq_avail_idx;
- uint64_t mbuf_initializer; /**< value to init mbufs. */
- phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
-
- struct rte_mbuf **sw_ring; /**< RX software ring. */
- /* dummy mbuf, for wraparound when processing RX ring. */
- struct rte_mbuf fake_mbuf;
-
- /* Statistics */
- uint64_t packets;
- uint64_t bytes;
- uint64_t errors;
- uint64_t multicast;
- uint64_t broadcast;
- /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
- uint64_t size_bins[8];
-
- uint16_t *notify_addr;
-
- struct vq_desc_extra {
- void *cookie;
- uint16_t ndescs;
- } vq_descx[0];
+ uint16_t vq_queue_index; /**< PCI queue index */
+ uint16_t offset; /**< relative offset to obtain addr in mbuf */
+ uint16_t *notify_addr;
+ int configured;
+ struct rte_mbuf **sw_ring; /**< RX software ring. */
+ struct vq_desc_extra vq_descx[0];
};
/* If multiqueue is provided by host, then we suppport it. */
@@ -302,7 +295,8 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
* descriptor.
*/
avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
- vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
vq->vq_avail_idx++;
}
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 4cf3b33b..23ff1da2 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -39,13 +39,13 @@ LIB = librte_pmd_vmxnet3_uio.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-ifeq ($(CC), icc)
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
-else ifeq ($(CC), clang)
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
#
# CFLAGS for clang
#
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index bd7a2bb7..29b469cc 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -694,7 +694,6 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->q_errors[i] = rxStats->pktsRxError;
stats->ierrors += rxStats->pktsRxError;
- stats->imcasts += rxStats->mcastPktsRxOK;
stats->rx_nombuf += rxStats->pktsRxOutOfBuf;
}
}
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 4f9d0bd2..1be833ab 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -34,14 +34,6 @@
#ifndef _VMXNET3_ETHDEV_H_
#define _VMXNET3_ETHDEV_H_
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-#define VMXNET3_ASSERT(x) do { \
- if (!(x)) rte_panic("VMXNET3: %s\n", #x); \
-} while(0)
-#else
-#define VMXNET3_ASSERT(x) do { (void)(x); } while (0)
-#endif
-
#define VMXNET3_MAX_MAC_ADDRS 1
/* UPT feature to negotiate */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 4ac0456c..9deeb3ff 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -86,16 +86,6 @@ static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
#endif
-static struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
static void
vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
@@ -106,7 +96,7 @@ vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
return;
PMD_RX_LOG(DEBUG,
- "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.",
+ "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
PMD_RX_LOG(DEBUG,
"RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
@@ -136,7 +126,7 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
if (txq == NULL)
return;
- PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p data ring base : 0x%p.",
+ PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
(unsigned long)txq->cmd_ring.basePA,
@@ -296,7 +286,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
struct rte_mbuf *mbuf;
/* Release cmd_ring descriptor and free mbuf */
- VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
+ RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
mbuf = txq->cmd_ring.buf_info[eop_idx].m;
if (mbuf == NULL)
@@ -307,7 +297,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
while (txq->cmd_ring.next2comp != eop_idx) {
/* no out-of-order completion */
- VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
+ RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
completed++;
}
@@ -454,7 +444,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
if (tso) {
uint16_t mss = txm->tso_segsz;
- VMXNET3_ASSERT(mss > 0);
+ RTE_ASSERT(mss > 0);
gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
gdesc->txd.om = VMXNET3_OM_TSO;
@@ -544,7 +534,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
/* Allocate blank mbuf for the current Rx Descriptor */
- mbuf = rte_rxmbuf_alloc(rxq->mp);
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(mbuf == NULL)) {
PMD_RX_LOG(ERR, "Error allocating mbuf");
rxq->stats.rx_buf_alloc_failure++;
@@ -587,12 +577,6 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
static void
vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
{
- /* Check for hardware stripped VLAN tag */
- if (rcd->ts) {
- rxm->ol_flags |= PKT_RX_VLAN_PKT;
- rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
- }
-
/* Check for RSS */
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
rxm->ol_flags |= PKT_RX_RSS_HASH;
@@ -658,12 +642,13 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
idx = rcd->rxdIdx;
ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
+ RTE_SET_USED(rxd); /* used only for assert when enabled */
rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
- VMXNET3_ASSERT(rcd->len <= rxd->len);
- VMXNET3_ASSERT(rbi->m);
+ RTE_ASSERT(rcd->len <= rxd->len);
+ RTE_ASSERT(rbi->m);
/* Get the packet buffer pointer from buf_info */
rxm = rbi->m;
@@ -710,10 +695,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
* the last mbuf of the current packet.
*/
if (rcd->sop) {
- VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
+ RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
if (unlikely(rcd->len == 0)) {
- VMXNET3_ASSERT(rcd->eop);
+ RTE_ASSERT(rcd->eop);
PMD_RX_LOG(DEBUG,
"Rx buf was skipped. rxring[%d][%d])",
@@ -727,7 +712,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
} else {
struct rte_mbuf *start = rxq->start_seg;
- VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
+ RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
start->pkt_len += rxm->data_len;
start->nb_segs++;
@@ -737,7 +722,15 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->last_seg = rxm;
if (rcd->eop) {
- rx_pkts[nb_rx++] = rxq->start_seg;
+ struct rte_mbuf *start = rxq->start_seg;
+
+ /* Check for hardware stripped VLAN tag */
+ if (rcd->ts) {
+ start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+ }
+
+ rx_pkts[nb_rx++] = start;
rxq->start_seg = NULL;
}
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c
index b9638d96..3e45808f 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.c
+++ b/drivers/net/xenvirt/rte_eth_xenvirt.c
@@ -39,6 +39,9 @@
#include <sys/mman.h>
#include <errno.h>
#include <sys/user.h>
+#ifndef PAGE_SIZE
+#define PAGE_SIZE sysconf(_SC_PAGE_SIZE)
+#endif
#include <linux/binfmts.h>
#include <xen/xen-compat.h>
#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
@@ -79,18 +82,6 @@ static struct rte_eth_link pmd_link = {
static void
eth_xenvirt_free_queues(struct rte_eth_dev *dev);
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
-
- return m;
-}
-
-
static uint16_t
eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
@@ -122,7 +113,7 @@ eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
/* allocate new mbuf for the used descriptor */
while (likely(!virtqueue_full(rxvq))) {
- new_mbuf = rte_rxmbuf_alloc(rxvq->mpool);
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
break;
}
@@ -293,7 +284,7 @@ eth_dev_start(struct rte_eth_dev *dev)
dev->data->dev_link.link_status = ETH_LINK_UP;
while (!virtqueue_full(rxvq)) {
- m = rte_rxmbuf_alloc(rxvq->mpool);
+ m = rte_mbuf_raw_alloc(rxvq->mpool);
if (m == NULL)
break;
/* Enqueue allocated buffers. */
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.h b/drivers/net/xenvirt/rte_eth_xenvirt.h
index fc15a636..4995a9b4 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.h
+++ b/drivers/net/xenvirt/rte_eth_xenvirt.h
@@ -51,7 +51,7 @@ struct rte_mempool *
rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags);
diff --git a/drivers/net/xenvirt/rte_mempool_gntalloc.c b/drivers/net/xenvirt/rte_mempool_gntalloc.c
index 7bfbfda3..73e82f80 100644
--- a/drivers/net/xenvirt/rte_mempool_gntalloc.c
+++ b/drivers/net/xenvirt/rte_mempool_gntalloc.c
@@ -78,7 +78,7 @@ static struct _mempool_gntalloc_info
_create_mempool(const char *name, unsigned elt_num, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
struct _mempool_gntalloc_info mgi;
@@ -202,7 +202,7 @@ _create_mempool(const char *name, unsigned elt_num, unsigned elt_size,
obj_init, obj_init_arg,
socket_id, flags, va, pa_arr, rpg_num, pg_shift);
- RTE_VERIFY(elt_num == mp->size);
+ RTE_ASSERT(elt_num == mp->size);
}
mgi.mp = mp;
mgi.pg_num = rpg_num;
@@ -253,7 +253,7 @@ struct rte_mempool *
rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
int rv;
diff --git a/drivers/net/xenvirt/rte_xen_lib.c b/drivers/net/xenvirt/rte_xen_lib.c
index de63cd30..6c9a1d49 100644
--- a/drivers/net/xenvirt/rte_xen_lib.c
+++ b/drivers/net/xenvirt/rte_xen_lib.c
@@ -423,6 +423,7 @@ grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *g
{
char key_str[PATH_MAX] = {0};
char val_str[PATH_MAX] = {0};
+ void *mempool_obj_va;
if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) {
return -1;
@@ -437,7 +438,14 @@ grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *g
if (snprintf(key_str, sizeof(key_str),
DPDK_XENSTORE_PATH"%d"MEMPOOL_VA_XENSTORE_STR, mempool_idx) == -1)
return -1;
- if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR, (uintptr_t)mpool->elt_va_start) == -1)
+ if (mpool->nb_mem_chunks != 1) {
+ RTE_LOG(ERR, PMD,
+ "mempool with more than 1 chunk is not supported\n");
+ return -1;
+ }
+ mempool_obj_va = STAILQ_FIRST(&mpool->mem_list)->addr;
+ if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR,
+ (uintptr_t)mempool_obj_va) == -1)
return -1;
if (xenstore_write(key_str, val_str) == -1)
return -1;
diff --git a/examples/Makefile b/examples/Makefile
index b28b30e7..f650d3ec 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2014 6WIND S.A.
+# Copyright(c) 2016 6WIND S.A.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -64,6 +64,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += l2fwd-crypto
DIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += l2fwd-ivshmem
DIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += l2fwd-jobstats
DIRS-y += l2fwd-keepalive
+DIRS-y += l2fwd-keepalive/ka-agent
DIRS-$(CONFIG_RTE_LIBRTE_LPM) += l3fwd
DIRS-$(CONFIG_RTE_LIBRTE_ACL) += l3fwd-acl
ifeq ($(CONFIG_RTE_LIBRTE_LPM),y)
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index c0201a9e..24857f2d 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -52,19 +52,6 @@
#define BURST_SIZE 32
#define RTE_RING_SZ 1024
-/* uncommnet below line to enable debug logs */
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) do { \
- RTE_LOG(DEBUG, log_type, fmt, ##args); \
-} while (0)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
#define RTE_LOGTYPE_DISTRAPP RTE_LOGTYPE_USER1
/* mask of enabled ports */
@@ -178,19 +165,25 @@ struct lcore_params {
struct rte_mempool *mem_pool;
};
-static void
+static int
quit_workers(struct rte_distributor *d, struct rte_mempool *p)
{
const unsigned num_workers = rte_lcore_count() - 2;
unsigned i;
struct rte_mbuf *bufs[num_workers];
- rte_mempool_get_bulk(p, (void *)bufs, num_workers);
+
+ if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
+ printf("line %d: Error getting mbufs from pool\n", __LINE__);
+ return -1;
+ }
for (i = 0; i < num_workers; i++)
bufs[i]->hash.rss = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);
+
+ return 0;
}
static int
@@ -240,7 +233,8 @@ lcore_rx(struct lcore_params *p)
uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
- LOG_DEBUG(DISTRAPP, "%s:Packet loss due to full ring\n", __func__);
+ RTE_LOG(DEBUG, DISTRAPP,
+ "%s:Packet loss due to full ring\n", __func__);
while (sent < nb_ret)
rte_pktmbuf_free(bufs[sent++]);
}
@@ -258,7 +252,8 @@ lcore_rx(struct lcore_params *p)
* get packets till quit_signal is actually been
* received and they gracefully shutdown
*/
- quit_workers(d, mem_pool);
+ if (quit_workers(d, mem_pool) != 0)
+ return -1;
/* rx thread should quit at last */
return 0;
}
@@ -271,7 +266,8 @@ flush_one_port(struct output_buffer *outbuf, uint8_t outp)
app_stats.tx.tx_pkts += nb_tx;
if (unlikely(nb_tx < outbuf->count)) {
- LOG_DEBUG(DISTRAPP, "%s:Packet loss with tx_burst\n", __func__);
+ RTE_LOG(DEBUG, DISTRAPP,
+ "%s:Packet loss with tx_burst\n", __func__);
do {
rte_pktmbuf_free(outbuf->mbufs[nb_tx]);
} while (++nb_tx < outbuf->count);
@@ -588,7 +584,9 @@ main(int argc, char *argv[])
}
/* call lcore_main on master core only */
struct lcore_params p = { 0, d, output_ring, mbuf_pool};
- lcore_rx(&p);
+
+ if (lcore_rx(&p) != 0)
+ return -1;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c
index dc68989a..3c6112d7 100644
--- a/examples/dpdk_qat/main.c
+++ b/examples/dpdk_qat/main.c
@@ -661,8 +661,6 @@ main(int argc, char **argv)
return -1;
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_panic("check_port_config failed\n");
diff --git a/examples/ethtool/ethtool-app/ethapp.c b/examples/ethtool/ethtool-app/ethapp.c
index 2ed4796d..38e466c0 100644
--- a/examples/ethtool/ethtool-app/ethapp.c
+++ b/examples/ethtool/ethtool-app/ethapp.c
@@ -535,7 +535,6 @@ static void pcmd_portstats_callback(__rte_unused void *ptr_params,
}
stat = rte_ethtool_net_get_stats64(params->port, &stat_info);
if (stat == 0) {
- /* Most of rte_eth_stats is deprecated.. */
printf("Port %i stats\n", params->port);
printf(" In: %" PRIu64 " (%" PRIu64 " bytes)\n"
" Out: %"PRIu64" (%"PRIu64 " bytes)\n"
diff --git a/examples/ethtool/lib/Makefile b/examples/ethtool/lib/Makefile
index d7ee9555..5b4991e2 100644
--- a/examples/ethtool/lib/Makefile
+++ b/examples/ethtool/lib/Makefile
@@ -54,4 +54,8 @@ SRCS-y := rte_ethtool.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+# internal dependencies
+DEPDIRS-y += lib/librte_eal
+DEPDIRS-y += lib/librte_ether
+
include $(RTE_SDK)/mk/rte.extlib.mk
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 42e05f1f..54391f21 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -51,8 +51,7 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
if (drvinfo == NULL)
return -EINVAL;
- if (!rte_eth_dev_is_valid_port(port_id))
- return -ENODEV;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
@@ -120,8 +119,7 @@ rte_ethtool_get_link(uint8_t port_id)
{
struct rte_eth_link link;
- if (!rte_eth_dev_is_valid_port(port_id))
- return -ENODEV;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_link_get(port_id, &link);
return link.link_status;
}
@@ -267,8 +265,7 @@ rte_ethtool_net_open(uint8_t port_id)
int
rte_ethtool_net_stop(uint8_t port_id)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return -ENODEV;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_dev_stop(port_id);
return 0;
@@ -277,8 +274,7 @@ rte_ethtool_net_stop(uint8_t port_id)
int
rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return -ENODEV;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (addr == NULL)
return -EINVAL;
rte_eth_macaddr_get(port_id, addr);
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index bec98040..e5eedcc1 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -350,8 +350,7 @@ setup_port_lcore_affinities(void)
}
port_ids[i] = rx_port++;
- }
- else if (output_cores_mask & (1ULL << i)) {
+ } else if (output_cores_mask & (1ULL << (i & 0x3f))) {
/* Skip ports that are not enabled */
while ((ports_mask & (1 << tx_port)) == 0) {
tx_port++;
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 81a49187..2f452648 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -785,7 +785,7 @@ init_mem(void)
RTE_LOG(INFO, IP_FRAG, "Creating LPM6 table on socket %i\n", socket);
snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
- lpm6 = rte_lpm6_create("IP_FRAG_LPM6", socket, &lpm6_config);
+ lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
if (lpm6 == NULL) {
RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
return -1;
@@ -824,9 +824,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid arguments");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
- else if (nb_ports == 0)
+ if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No ports found!\n");
nb_lcores = rte_lcore_count();
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index 10fe1ba9..58271173 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -50,6 +50,7 @@ INC += $(wildcard *.h) $(wildcard pipeline/*.h)
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += parser.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse_tm.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_check.c
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += init.c
diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
index 55a98417..6a6fdd97 100644
--- a/examples/ip_pipeline/app.h
+++ b/examples/ip_pipeline/app.h
@@ -44,12 +44,24 @@
#include <cmdline_parse.h>
#include <rte_ethdev.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_kni.h>
+#endif
#include "cpu_core_map.h"
#include "pipeline.h"
#define APP_PARAM_NAME_SIZE PIPELINE_NAME_SIZE
#define APP_LINK_PCI_BDF_SIZE 16
+
+#ifndef APP_LINK_MAX_HWQ_IN
+#define APP_LINK_MAX_HWQ_IN 128
+#endif
+
+#ifndef APP_LINK_MAX_HWQ_OUT
+#define APP_LINK_MAX_HWQ_OUT 128
+#endif
+
struct app_mempool_params {
char *name;
uint32_t parsed;
@@ -69,6 +81,12 @@ struct app_link_params {
uint32_t tcp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
uint32_t udp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
uint32_t sctp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t rss_qs[APP_LINK_MAX_HWQ_IN];
+ uint32_t n_rss_qs;
+ uint64_t rss_proto_ipv4;
+ uint64_t rss_proto_ipv6;
+ uint64_t rss_proto_l2;
+ uint32_t promisc;
uint32_t state; /* DOWN = 0, UP = 1 */
uint32_t ip; /* 0 = Invalid */
uint32_t depth; /* Valid only when IP is valid */
@@ -76,7 +94,6 @@ struct app_link_params {
char pci_bdf[APP_LINK_PCI_BDF_SIZE];
struct rte_eth_conf conf;
- uint8_t promisc;
};
struct app_pktq_hwq_in_params {
@@ -118,6 +135,22 @@ struct app_pktq_swq_params {
uint32_t mempool_indirect_id;
};
+struct app_pktq_kni_params {
+ char *name;
+ uint32_t parsed;
+
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t hyper_th_id;
+ uint32_t force_bind;
+
+ uint32_t mempool_id; /* Position in the app->mempool_params */
+ uint32_t burst_read;
+ uint32_t burst_write;
+ uint32_t dropless;
+ uint64_t n_retries;
+};
+
#ifndef APP_FILE_NAME_SIZE
#define APP_FILE_NAME_SIZE 256
#endif
@@ -171,6 +204,7 @@ enum app_pktq_in_type {
APP_PKTQ_IN_HWQ,
APP_PKTQ_IN_SWQ,
APP_PKTQ_IN_TM,
+ APP_PKTQ_IN_KNI,
APP_PKTQ_IN_SOURCE,
};
@@ -183,6 +217,7 @@ enum app_pktq_out_type {
APP_PKTQ_OUT_HWQ,
APP_PKTQ_OUT_SWQ,
APP_PKTQ_OUT_TM,
+ APP_PKTQ_OUT_KNI,
APP_PKTQ_OUT_SINK,
};
@@ -191,9 +226,7 @@ struct app_pktq_out_params {
uint32_t id; /* Position in the appropriate app array */
};
-#ifndef APP_PIPELINE_TYPE_SIZE
-#define APP_PIPELINE_TYPE_SIZE 64
-#endif
+#define APP_PIPELINE_TYPE_SIZE PIPELINE_TYPE_SIZE
#define APP_MAX_PIPELINE_PKTQ_IN PIPELINE_MAX_PORT_IN
#define APP_MAX_PIPELINE_PKTQ_OUT PIPELINE_MAX_PORT_OUT
@@ -229,6 +262,22 @@ struct app_pipeline_params {
uint32_t n_args;
};
+struct app_params;
+
+typedef void (*app_link_op)(struct app_params *app,
+ uint32_t link_id,
+ uint32_t up,
+ void *arg);
+
+#ifndef APP_MAX_PIPELINES
+#define APP_MAX_PIPELINES 64
+#endif
+
+struct app_link_data {
+ app_link_op f_link[APP_MAX_PIPELINES];
+ void *arg[APP_MAX_PIPELINES];
+};
+
struct app_pipeline_data {
void *be;
void *fe;
@@ -247,7 +296,7 @@ struct app_thread_pipeline_data {
};
#ifndef APP_MAX_THREAD_PIPELINES
-#define APP_MAX_THREAD_PIPELINES 16
+#define APP_MAX_THREAD_PIPELINES 64
#endif
#ifndef APP_THREAD_TIMER_PERIOD
@@ -272,7 +321,7 @@ struct app_thread_data {
uint64_t headroom_time;
uint64_t headroom_cycles;
double headroom_ratio;
-};
+} __rte_cache_aligned;
#ifndef APP_MAX_LINKS
#define APP_MAX_LINKS 16
@@ -370,6 +419,8 @@ struct app_eal_params {
/* Support running on Xen dom0 without hugetlbfs */
uint32_t xen_dom0_present;
int xen_dom0;
+
+ uint32_t parsed;
};
#ifndef APP_APPNAME_SIZE
@@ -380,17 +431,9 @@ struct app_eal_params {
#define APP_MAX_MEMPOOLS 8
#endif
-#ifndef APP_LINK_MAX_HWQ_IN
-#define APP_LINK_MAX_HWQ_IN 64
-#endif
+#define APP_MAX_HWQ_IN (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
-#ifndef APP_LINK_MAX_HWQ_OUT
-#define APP_LINK_MAX_HWQ_OUT 64
-#endif
-
-#define APP_MAX_HWQ_IN (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
-
-#define APP_MAX_HWQ_OUT (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
+#define APP_MAX_HWQ_OUT (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
#ifndef APP_MAX_PKTQ_SWQ
#define APP_MAX_PKTQ_SWQ 256
@@ -398,24 +441,22 @@ struct app_eal_params {
#define APP_MAX_PKTQ_TM APP_MAX_LINKS
+#define APP_MAX_PKTQ_KNI APP_MAX_LINKS
+
#ifndef APP_MAX_PKTQ_SOURCE
-#define APP_MAX_PKTQ_SOURCE 16
+#define APP_MAX_PKTQ_SOURCE 64
#endif
#ifndef APP_MAX_PKTQ_SINK
-#define APP_MAX_PKTQ_SINK 16
+#define APP_MAX_PKTQ_SINK 64
#endif
#ifndef APP_MAX_MSGQ
-#define APP_MAX_MSGQ 64
-#endif
-
-#ifndef APP_MAX_PIPELINES
-#define APP_MAX_PIPELINES 64
+#define APP_MAX_MSGQ 256
#endif
#ifndef APP_EAL_ARGC
-#define APP_EAL_ARGC 32
+#define APP_EAL_ARGC 64
#endif
#ifndef APP_MAX_PIPELINE_TYPES
@@ -453,6 +494,7 @@ struct app_params {
struct app_pktq_hwq_out_params hwq_out_params[APP_MAX_HWQ_OUT];
struct app_pktq_swq_params swq_params[APP_MAX_PKTQ_SWQ];
struct app_pktq_tm_params tm_params[APP_MAX_PKTQ_TM];
+ struct app_pktq_kni_params kni_params[APP_MAX_PKTQ_KNI];
struct app_pktq_source_params source_params[APP_MAX_PKTQ_SOURCE];
struct app_pktq_sink_params sink_params[APP_MAX_PKTQ_SINK];
struct app_msgq_params msgq_params[APP_MAX_MSGQ];
@@ -464,6 +506,7 @@ struct app_params {
uint32_t n_pktq_hwq_out;
uint32_t n_pktq_swq;
uint32_t n_pktq_tm;
+ uint32_t n_pktq_kni;
uint32_t n_pktq_source;
uint32_t n_pktq_sink;
uint32_t n_msgq;
@@ -474,8 +517,12 @@ struct app_params {
struct cpu_core_map *core_map;
uint64_t core_mask;
struct rte_mempool *mempool[APP_MAX_MEMPOOLS];
+ struct app_link_data link_data[APP_MAX_LINKS];
struct rte_ring *swq[APP_MAX_PKTQ_SWQ];
struct rte_sched_port *tm[APP_MAX_PKTQ_TM];
+#ifdef RTE_LIBRTE_KNI
+ struct rte_kni *kni[APP_MAX_PKTQ_KNI];
+#endif /* RTE_LIBRTE_KNI */
struct rte_ring *msgq[APP_MAX_MSGQ];
struct pipeline_type pipeline_type[APP_MAX_PIPELINE_TYPES];
struct app_pipeline_data pipeline_data[APP_MAX_PIPELINES];
@@ -529,28 +576,6 @@ do \
sscanf(obj->name, prefix "%" SCNu32, &id); \
while (0) \
-#define APP_PARAM_ADD(obj_array, obj_name) \
-({ \
- ssize_t obj_idx; \
- const ssize_t obj_count = RTE_DIM(obj_array); \
- \
- obj_idx = APP_PARAM_FIND(obj_array, obj_name); \
- if (obj_idx < 0) { \
- for (obj_idx = 0; obj_idx < obj_count; obj_idx++) { \
- if (!APP_PARAM_VALID(&((obj_array)[obj_idx]))) \
- break; \
- } \
- \
- if (obj_idx < obj_count) { \
- (obj_array)[obj_idx].name = strdup(obj_name); \
- if ((obj_array)[obj_idx].name == NULL) \
- obj_idx = -EINVAL; \
- } else \
- obj_idx = -ENOMEM; \
- } \
- obj_idx; \
-})
-
#define APP_CHECK(exp, fmt, ...) \
do { \
if (!(exp)) { \
@@ -665,6 +690,41 @@ app_swq_get_readers(struct app_params *app, struct app_pktq_swq_params *swq)
return n_readers;
}
+static inline struct app_pipeline_params *
+app_swq_get_reader(struct app_params *app,
+ struct app_pktq_swq_params *swq,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SWQ) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
static inline uint32_t
app_tm_get_readers(struct app_params *app, struct app_pktq_tm_params *tm)
{
@@ -690,6 +750,101 @@ app_tm_get_readers(struct app_params *app, struct app_pktq_tm_params *tm)
return n_readers;
}
+static inline struct app_pipeline_params *
+app_tm_get_reader(struct app_params *app,
+ struct app_pktq_tm_params *tm,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_TM) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
+static inline uint32_t
+app_kni_get_readers(struct app_params *app, struct app_pktq_kni_params *kni)
+{
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_KNI) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline struct app_pipeline_params *
+app_kni_get_reader(struct app_params *app,
+ struct app_pktq_kni_params *kni,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_KNI) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
static inline uint32_t
app_source_get_readers(struct app_params *app,
struct app_pktq_source_params *source)
@@ -789,6 +944,42 @@ app_swq_get_writers(struct app_params *app, struct app_pktq_swq_params *swq)
return n_writers;
}
+static inline struct app_pipeline_params *
+app_swq_get_writer(struct app_params *app,
+ struct app_pktq_swq_params *swq,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SWQ) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
static inline uint32_t
app_tm_get_writers(struct app_params *app, struct app_pktq_tm_params *tm)
{
@@ -815,6 +1006,104 @@ app_tm_get_writers(struct app_params *app, struct app_pktq_tm_params *tm)
return n_writers;
}
+static inline struct app_pipeline_params *
+app_tm_get_writer(struct app_params *app,
+ struct app_pktq_tm_params *tm,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_TM) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
+static inline uint32_t
+app_kni_get_writers(struct app_params *app, struct app_pktq_kni_params *kni)
+{
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_KNI) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline struct app_pipeline_params *
+app_kni_get_writer(struct app_params *app,
+ struct app_pktq_kni_params *kni,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_KNI) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
static inline uint32_t
app_sink_get_writers(struct app_params *app, struct app_pktq_sink_params *sink)
{
@@ -913,6 +1202,26 @@ app_get_link_for_tm(struct app_params *app, struct app_pktq_tm_params *p_tm)
return &app->link_params[link_param_idx];
}
+static inline struct app_link_params *
+app_get_link_for_kni(struct app_params *app, struct app_pktq_kni_params *p_kni)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ uint32_t link_id;
+ ssize_t link_param_idx;
+
+ sscanf(p_kni->name, "KNI%" PRIu32, &link_id);
+ sprintf(link_name, "LINK%" PRIu32, link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p_kni->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+void app_pipeline_params_get(struct app_params *app,
+ struct app_pipeline_params *p_in,
+ struct pipeline_params *p_out);
+
int app_config_init(struct app_params *app);
int app_config_args(struct app_params *app,
@@ -932,6 +1241,8 @@ int app_config_check(struct app_params *app);
int app_init(struct app_params *app);
+int app_post_init(struct app_params *app);
+
int app_thread(void *arg);
int app_pipeline_type_register(struct app_params *app,
diff --git a/examples/ip_pipeline/config/action.cfg b/examples/ip_pipeline/config/action.cfg
new file mode 100644
index 00000000..994ae94a
--- /dev/null
+++ b/examples/ip_pipeline/config/action.cfg
@@ -0,0 +1,68 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; ________________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Flow |
+; RXQ2.0 --->| Actions |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |________________|
+;
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FLOW_ACTIONS
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+n_flows = 65536
+n_meters_per_flow = 4
+flow_id_offset = 286; ipdaddr
+ip_hdr_offset = 270
+color_offset = 128
diff --git a/examples/ip_pipeline/config/action.sh b/examples/ip_pipeline/config/action.sh
new file mode 100644
index 00000000..2986ae60
--- /dev/null
+++ b/examples/ip_pipeline/config/action.sh
@@ -0,0 +1,119 @@
+#
+# run ./config/action.sh
+#
+
+p 1 action flow 0 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 0 g G y Y r R
+p 1 action flow 0 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 1 g G y Y r R
+p 1 action flow 0 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 2 g G y Y r R
+p 1 action flow 0 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 3 g G y Y r R
+p 1 action flow 0 port 0
+
+p 1 action flow 1 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 0 g G y Y r R
+p 1 action flow 1 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 1 g G y Y r R
+p 1 action flow 1 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 2 g G y Y r R
+p 1 action flow 1 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 3 g G y Y r R
+p 1 action flow 1 port 1
+
+p 1 action flow 2 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 0 g G y Y r R
+p 1 action flow 2 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 1 g G y Y r R
+p 1 action flow 2 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 2 g G y Y r R
+p 1 action flow 2 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 3 g G y Y r R
+p 1 action flow 2 port 2
+
+p 1 action flow 3 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 0 g G y Y r R
+p 1 action flow 3 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 1 g G y Y r R
+p 1 action flow 3 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 2 g G y Y r R
+p 1 action flow 3 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 3 g G y Y r R
+p 1 action flow 3 port 3
+
+#p 1 action flow bulk ./config/action.txt
+
+#p 1 action flow ls
+
+p 1 action flow 0 stats
+p 1 action flow 1 stats
+p 1 action flow 2 stats
+p 1 action flow 3 stats
+
+p 1 action dscp 0 class 0 color G
+p 1 action dscp 1 class 1 color G
+p 1 action dscp 2 class 2 color G
+p 1 action dscp 3 class 3 color G
+p 1 action dscp 4 class 0 color G
+p 1 action dscp 5 class 1 color G
+p 1 action dscp 6 class 2 color G
+p 1 action dscp 7 class 3 color G
+p 1 action dscp 8 class 0 color G
+p 1 action dscp 9 class 1 color G
+p 1 action dscp 10 class 2 color G
+p 1 action dscp 11 class 3 color G
+p 1 action dscp 12 class 0 color G
+p 1 action dscp 13 class 1 color G
+p 1 action dscp 14 class 2 color G
+p 1 action dscp 15 class 3 color G
+p 1 action dscp 16 class 0 color G
+p 1 action dscp 17 class 1 color G
+p 1 action dscp 18 class 2 color G
+p 1 action dscp 19 class 3 color G
+p 1 action dscp 20 class 0 color G
+p 1 action dscp 21 class 1 color G
+p 1 action dscp 22 class 2 color G
+p 1 action dscp 23 class 3 color G
+p 1 action dscp 24 class 0 color G
+p 1 action dscp 25 class 1 color G
+p 1 action dscp 26 class 2 color G
+p 1 action dscp 27 class 3 color G
+p 1 action dscp 27 class 0 color G
+p 1 action dscp 29 class 1 color G
+p 1 action dscp 30 class 2 color G
+p 1 action dscp 31 class 3 color G
+p 1 action dscp 32 class 0 color G
+p 1 action dscp 33 class 1 color G
+p 1 action dscp 34 class 2 color G
+p 1 action dscp 35 class 3 color G
+p 1 action dscp 36 class 0 color G
+p 1 action dscp 37 class 1 color G
+p 1 action dscp 38 class 2 color G
+p 1 action dscp 39 class 3 color G
+p 1 action dscp 40 class 0 color G
+p 1 action dscp 41 class 1 color G
+p 1 action dscp 42 class 2 color G
+p 1 action dscp 43 class 3 color G
+p 1 action dscp 44 class 0 color G
+p 1 action dscp 45 class 1 color G
+p 1 action dscp 46 class 2 color G
+p 1 action dscp 47 class 3 color G
+p 1 action dscp 48 class 0 color G
+p 1 action dscp 49 class 1 color G
+p 1 action dscp 50 class 2 color G
+p 1 action dscp 51 class 3 color G
+p 1 action dscp 52 class 0 color G
+p 1 action dscp 53 class 1 color G
+p 1 action dscp 54 class 2 color G
+p 1 action dscp 55 class 3 color G
+p 1 action dscp 56 class 0 color G
+p 1 action dscp 57 class 1 color G
+p 1 action dscp 58 class 2 color G
+p 1 action dscp 59 class 3 color G
+p 1 action dscp 60 class 0 color G
+p 1 action dscp 61 class 1 color G
+p 1 action dscp 62 class 2 color G
+p 1 action dscp 63 class 3 color G
+
+p 1 action dscp ls
diff --git a/examples/ip_pipeline/config/action.txt b/examples/ip_pipeline/config/action.txt
new file mode 100644
index 00000000..f14207b9
--- /dev/null
+++ b/examples/ip_pipeline/config/action.txt
@@ -0,0 +1,8 @@
+#
+# p <pipelineid> action flow bulk ./config/action.txt
+#
+
+flow 0 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 0
+flow 1 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 1
+flow 2 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 2
+flow 3 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 3
diff --git a/examples/ip_pipeline/config/edge_router_downstream.cfg b/examples/ip_pipeline/config/edge_router_downstream.cfg
index 85bbab8f..c6b4e1f2 100644
--- a/examples/ip_pipeline/config/edge_router_downstream.cfg
+++ b/examples/ip_pipeline/config/edge_router_downstream.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -36,9 +36,9 @@
; network) contains the following functional blocks: Packet RX & Routing,
; Traffic management and Packet TX. The input packets are assumed to be
; IPv4, while the output packets are Q-in-Q IPv4.
-
+;
; A simple implementation for this functional pipeline is presented below.
-
+;
; Packet Rx & Traffic Management Packet Tx
; Routing (Pass-Through) (Pass-Through)
; _____________________ SWQ0 ______________________ SWQ4 _____________________
@@ -50,11 +50,23 @@
; | | SWQ3 | | SWQ7 | |
; RXQ3.0 --->| |----->| |----->| |---> TXQ3.0
; |_____________________| |______________________| |_____________________|
-; | _|_ ^ _|_ ^ _|_ ^ _|_ ^
-; | |___|||___|||___|||___||
-; +--> SINK0 |___|||___|||___|||___||
-; (route miss) |__| |__| |__| |__|
-; TM0 TM1 TM2 TM3
+; | | ^ | ^ | ^ | ^
+; | |__| |__| |__| |__|
+; +--> SINK0 TM0 TM1 TM2 TM3
+; (Default)
+;
+; Input packet: Ethernet/IPv4
+; Output packet: Ethernet/QinQ/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
[PIPELINE0]
type = MASTER
@@ -67,7 +79,7 @@ pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
pktq_out = SWQ0 SWQ1 SWQ2 SWQ3 SINK0
encap = ethernet_qinq
qinq_sched = test
-ip_hdr_offset = 270; mbuf (128) + headroom (128) + ethernet header (14) = 270
+ip_hdr_offset = 270
[PIPELINE2]
type = PASS-THROUGH
diff --git a/examples/ip_pipeline/config/edge_router_downstream.sh b/examples/ip_pipeline/config/edge_router_downstream.sh
index ce46beb5..67c3a0d1 100644
--- a/examples/ip_pipeline/config/edge_router_downstream.sh
+++ b/examples/ip_pipeline/config/edge_router_downstream.sh
@@ -1,3 +1,7 @@
+#
+# run ./config/edge_router_downstream.sh
+#
+
################################################################################
# Routing: Ether QinQ, ARP off
################################################################################
@@ -6,5 +10,4 @@ p 1 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 256 257
p 1 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 258 259
p 1 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 260 261
p 1 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 262 263
-
-p 1 route ls
+#p 1 route ls
diff --git a/examples/ip_pipeline/config/edge_router_upstream.cfg b/examples/ip_pipeline/config/edge_router_upstream.cfg
index a08c5cce..dea42b95 100644
--- a/examples/ip_pipeline/config/edge_router_upstream.cfg
+++ b/examples/ip_pipeline/config/edge_router_upstream.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -29,6 +29,7 @@
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
; An edge router typically sits between two networks such as the provider
; core network and the provider access network. A typical packet processing
; pipeline for the upstream traffic (i.e. traffic from access to core
@@ -36,10 +37,10 @@
; Flow classification, Metering, Routing and Packet TX. The input packets
; are assumed to be Q-in-Q IPv4, while the output packets are MPLS IPv4
; (with variable number of labels per route).
-
+;
; A simple implementation for this functional pipeline is presented below.
-
-; Packet Rx & Pass-Through Flow-Classification Flow-Actions Routing
+;
+; Packet RX & Pass-Through Flow Classification Flow Actions Routing
: Firewall
; __________ SWQ0 __________ SWQ4 __________ SWQ8 __________ SWQ12 __________
; RXQ0.0 --->| |------>| |------>| |------>| |------>| |------> TXQ0.0
@@ -51,8 +52,21 @@
; RXQ3.0 --->| |------>| |------>| |------>| |------>| |------> TXQ3.0
; |__________| |__________| |__________| |__________| |__________|
; | | |
-; +--> SINK0 (Default) +--> SINK1 (Default) +--> SINK2 (Route Miss)
+; +--> SINK0 (Default) +--> SINK1 (Default) +--> SINK2 (Default)
+;
+; Input packet: Ethernet/QinQ/IPv4
+; Output packet: Ethernet/MPLS/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 QinQ header 270 8
+; 4 IPv4 header 278 20
+[EAL]
+log_level = 0
[PIPELINE0]
type = MASTER
@@ -72,10 +86,10 @@ core = 2
pktq_in = SWQ0 SWQ1 SWQ2 SWQ3
pktq_out = SWQ4 SWQ5 SWQ6 SWQ7
dma_size = 8
-dma_dst_offset = 128; mbuf (128)
-dma_src_offset = 268; mbuf (128) + headroom (128) + 1st ethertype offset (12) = 268
+dma_dst_offset = 128
+dma_src_offset = 268; 1st Ethertype offset
dma_src_mask = 00000FFF00000FFF; qinq
-dma_hash_offset = 136; dma_dst_offset + dma_size = 136
+dma_hash_offset = 136; dma_dst_offset + dma_size
[PIPELINE3]
type = FLOW_CLASSIFICATION
@@ -86,7 +100,7 @@ n_flows = 65536
key_size = 8; dma_size
key_offset = 128; dma_dst_offset
hash_offset = 136; dma_hash_offset
-flowid_offset = 192; mbuf (128) + 64
+flowid_offset = 192
[PIPELINE4]
type = FLOW_ACTIONS
@@ -96,7 +110,7 @@ pktq_out = SWQ12 SWQ13 SWQ14 SWQ15
n_flows = 65536
n_meters_per_flow = 1
flow_id_offset = 192; flowid_offset
-ip_hdr_offset = 278; mbuf (128) + headroom (128) + ethernet (14) + qinq (8) = 278
+ip_hdr_offset = 278
color_offset = 196; flowid_offset + sizeof(flow_id)
[PIPELINE5]
@@ -106,5 +120,5 @@ pktq_in = SWQ12 SWQ13 SWQ14 SWQ15
pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2
encap = ethernet_mpls
mpls_color_mark = yes
-ip_hdr_offset = 278; mbuf (128) + headroom (128) + ethernet (14) + qinq (8) = 278
+ip_hdr_offset = 278
color_offset = 196; flowid_offset + sizeof(flow_id)
diff --git a/examples/ip_pipeline/config/edge_router_upstream.sh b/examples/ip_pipeline/config/edge_router_upstream.sh
index eeba600c..5d574c1a 100644
--- a/examples/ip_pipeline/config/edge_router_upstream.sh
+++ b/examples/ip_pipeline/config/edge_router_upstream.sh
@@ -1,24 +1,26 @@
-################################################
-# Firewall Rules:4 for 4 ports
-################################################
-p 1 firewall add ipv4 1 0.0.0.0 8 0.0.0.0 10 0 0 0 0 6 1 0
-p 1 firewall add ipv4 1 0.0.0.0 8 0.64.0.0 10 0 0 0 0 6 1 1
-p 1 firewall add ipv4 1 0.0.0.0 8 0.128.0.0 10 0 0 0 0 6 1 2
-p 1 firewall add ipv4 1 0.0.0.0 8 0.192.0.0 10 0 0 0 0 6 1 3
-p 1 firewall add default 4 #SINK0
+#
+# run ./config/edge_router_upstream.sh
+#
+################################################################################
+# Firewall
+################################################################################
+p 1 firewall add default 4 #SINK0
+p 1 firewall add bulk ./config/edge_router_upstream_firewall.txt
+#p 1 firewall ls
################################################################################
-# Flow classification
+# Flow Classification
################################################################################
p 3 flow add default 4 #SINK1
-p 3 flow add qinq all 65536 4
+p 3 flow add qinq bulk ./config/edge_router_upstream_flow.txt
+#p 3 flow ls
################################################################################
-# Flow Actions - Metering
+# Flow Actions - Metering and Policing
################################################################################
-p 4 flows 65536 meter 0 trtcm 1250000000 1250000000 100000000 100000000
-p 4 flows 65536 ports 4
+p 4 action flow bulk ./config/edge_router_upstream_action.txt
+#p 4 action flow ls
################################################################################
# Routing: Ether MPLS, ARP off
@@ -28,11 +30,4 @@ p 5 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 0:1
p 5 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 10:11
p 5 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 20:21
p 5 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 30:31
-
-################################################################################
-# List all configurations
-################################################################################
-p 1 firewall ls
-#p 3 flow ls
-#p 4 flow actions ls
-p 5 route ls
+#p 5 route ls
diff --git a/examples/ip_pipeline/config/firewall.cfg b/examples/ip_pipeline/config/firewall.cfg
new file mode 100644
index 00000000..2f5dd9f6
--- /dev/null
+++ b/examples/ip_pipeline/config/firewall.cfg
@@ -0,0 +1,68 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; _______________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Firewall |
+; RXQ2.0 --->| |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |_______________|
+; |
+; +-----------> SINK0 (default rule)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FIREWALL
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+n_rules = 4096
+pkt_type = ipv4
+;pkt_type = vlan_ipv4
+;pkt_type = qinq_ipv4
diff --git a/examples/ip_pipeline/config/firewall.sh b/examples/ip_pipeline/config/firewall.sh
new file mode 100644
index 00000000..c83857ee
--- /dev/null
+++ b/examples/ip_pipeline/config/firewall.sh
@@ -0,0 +1,13 @@
+#
+# run ./config/firewall.sh
+#
+
+p 1 firewall add default 4 #SINK0
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 0xF port 0
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 0xF port 1
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 0xF port 2
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 0xF port 3
+
+#p 1 firewall add bulk ./config/firewall.txt
+
+p 1 firewall ls
diff --git a/examples/ip_pipeline/config/firewall.txt b/examples/ip_pipeline/config/firewall.txt
new file mode 100644
index 00000000..54cfffda
--- /dev/null
+++ b/examples/ip_pipeline/config/firewall.txt
@@ -0,0 +1,9 @@
+#
+# p <pipelineid> firewall add bulk ./config/firewall.txt
+# p <pipelineid> firewall del bulk ./config/firewall.txt
+#
+
+priority 1 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 0xF port 0
+priority 1 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 0xF port 1
+priority 1 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 0xF port 2
+priority 1 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 0xF port 3
diff --git a/examples/ip_pipeline/config/flow.cfg b/examples/ip_pipeline/config/flow.cfg
new file mode 100644
index 00000000..6895d393
--- /dev/null
+++ b/examples/ip_pipeline/config/flow.cfg
@@ -0,0 +1,72 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; ________________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Flow |
+; RXQ2.0 --->| Classification |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |________________|
+; |
+; +-----------> SINK0 (flow lookup miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 QinQ/IPv4/IPv6 header 270 8/20/40
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FLOW_CLASSIFICATION
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+n_flows = 65536
+;key_size = 8 ; QinQ key size
+;key_offset = 270 ; QinQ key offset
+;key_mask = 0000FFF00000FFF0 ; QinQ key mask
+key_size = 16 ; IPv4 5-tuple key size
+key_offset = 278 ; IPv4 5-tuple key offset
+key_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF ; IPv4 5-tuple key mask
+flowid_offset = 128
diff --git a/examples/ip_pipeline/config/flow.sh b/examples/ip_pipeline/config/flow.sh
new file mode 100644
index 00000000..489c7079
--- /dev/null
+++ b/examples/ip_pipeline/config/flow.sh
@@ -0,0 +1,25 @@
+#
+# run ./config/flow.sh
+#
+
+################################################################################
+# Flow classification (QinQ)
+################################################################################
+#p 1 flow add default 4 #SINK0
+#p 1 flow add qinq 100 200 port 0 id 0
+#p 1 flow add qinq 101 201 port 1 id 1
+#p 1 flow add qinq 102 202 port 2 id 2
+#p 1 flow add qinq 103 203 port 3 id 3
+
+#p 1 flow add qinq bulk ./config/flow.txt
+
+################################################################################
+# Flow classification (IPv4 5-tuple)
+################################################################################
+p 1 flow add default 4 #SINK0
+p 1 flow add ipv4 100.0.0.10 200.0.0.10 100 200 6 port 0 id 0
+p 1 flow add ipv4 100.0.0.11 200.0.0.11 101 201 6 port 1 id 1
+p 1 flow add ipv4 100.0.0.12 200.0.0.12 102 202 6 port 2 id 2
+p 1 flow add ipv4 100.0.0.13 200.0.0.13 103 203 6 port 3 id 3
+
+#p 1 flow add ipv4 bulk ./config/flow.txt
diff --git a/examples/ip_pipeline/config/flow.txt b/examples/ip_pipeline/config/flow.txt
new file mode 100644
index 00000000..c1a141dd
--- /dev/null
+++ b/examples/ip_pipeline/config/flow.txt
@@ -0,0 +1,17 @@
+#
+# p <pipelineid> flow add qinq bulk ./config/flow.txt
+#
+
+#qinq 100 200 port 0 id 0
+#qinq 101 201 port 1 id 1
+#qinq 102 202 port 2 id 2
+#qinq 103 203 port 3 id 3
+
+#
+# p <pipelineid> flow add ipv4 bulk ./config/flow.txt
+#
+
+ipv4 100.0.0.10 200.0.0.10 100 200 6 port 0 id 0
+ipv4 100.0.0.11 200.0.0.11 101 201 6 port 1 id 1
+ipv4 100.0.0.12 200.0.0.12 102 202 6 port 2 id 2
+ipv4 100.0.0.13 200.0.0.13 103 203 6 port 3 id 3
diff --git a/examples/ip_pipeline/config/kni.cfg b/examples/ip_pipeline/config/kni.cfg
new file mode 100644
index 00000000..cea208b4
--- /dev/null
+++ b/examples/ip_pipeline/config/kni.cfg
@@ -0,0 +1,67 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;
+; ______________ ______________________
+; | | KNI0 | |
+; RXQ0.0 --->| |------->|--+ |
+; | | KNI1 | | br0 |
+; TXQ1.0 <---| |<-------|<-+ |
+; | Pass-through | | Linux Kernel |
+; | (P1) | | Network Stack |
+; | | KNI1 | |
+; RXQ1.0 --->| |------->|--+ |
+; | | KNI0 | | br0 |
+; TXQ0.0 <---| |<-------|<-+ |
+; |______________| |______________________|
+;
+; Insert Linux kernel KNI module:
+; [Linux]$ insmod rte_kni.ko
+;
+; Configure Linux kernel bridge between KNI0 and KNI1 interfaces:
+; [Linux]$ ifconfig KNI0 up
+; [Linux]$ ifconfig KNI1 up
+; [Linux]$ brctl addbr "br0"
+; [Linux]$ brctl addif br0 KNI0
+; [Linux]$ brctl addif br0 KNI1
+; [Linux]$ ifconfig br0 up
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = RXQ0.0 KNI1 RXQ1.0 KNI0
+pktq_out = KNI0 TXQ1.0 KNI1 TXQ0.0
diff --git a/examples/ip_pipeline/config/l2fwd.cfg b/examples/ip_pipeline/config/l2fwd.cfg
index c743a143..a1df9e6a 100644
--- a/examples/ip_pipeline/config/l2fwd.cfg
+++ b/examples/ip_pipeline/config/l2fwd.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,9 @@
; |________________|
;
+[EAL]
+log_level = 0
+
[PIPELINE0]
type = MASTER
core = 0
diff --git a/examples/ip_pipeline/config/l3fwd.cfg b/examples/ip_pipeline/config/l3fwd.cfg
index 5449dc32..02c8f36f 100644
--- a/examples/ip_pipeline/config/l3fwd.cfg
+++ b/examples/ip_pipeline/config/l3fwd.cfg
@@ -1,6 +1,6 @@
; BSD LICENSE
;
-; Copyright(c) 2015 Intel Corporation. All rights reserved.
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
@@ -50,6 +50,9 @@
; 2 Ethernet header 256 14
; 3 IPv4 header 270 20
+[EAL]
+log_level = 0
+
[PIPELINE0]
type = MASTER
core = 0
@@ -59,5 +62,7 @@ type = ROUTING
core = 1
pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
-encap = ethernet; encap = ethernet / ethernet_qinq / ethernet_mpls
+encap = ethernet
+;encap = ethernet_qinq
+;encap = ethernet_mpls
ip_hdr_offset = 270
diff --git a/examples/ip_pipeline/config/l3fwd.sh b/examples/ip_pipeline/config/l3fwd.sh
index 27740103..47406aa4 100644
--- a/examples/ip_pipeline/config/l3fwd.sh
+++ b/examples/ip_pipeline/config/l3fwd.sh
@@ -1,9 +1,33 @@
+#
+# run ./config/l3fwd.sh
+#
+
################################################################################
# Routing: encap = ethernet, arp = off
################################################################################
p 1 route add default 4 #SINK0
-p 1 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0
-p 1 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1
-p 1 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2
-p 1 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3
+p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0
+p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1
+p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2
+p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3
p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_qinq, arp = off
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 1000 2000
+#p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 1001 2001
+#p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 1002 2002
+#p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 1003 2003
+#p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_mpls, arp = off
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 1000:2000
+#p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 1001:2001
+#p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 1002:2002
+#p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 1003:2003
+#p 1 route ls
diff --git a/examples/ip_pipeline/config/l3fwd_arp.cfg b/examples/ip_pipeline/config/l3fwd_arp.cfg
new file mode 100644
index 00000000..2c63c8fd
--- /dev/null
+++ b/examples/ip_pipeline/config/l3fwd_arp.cfg
@@ -0,0 +1,70 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; _______________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Routing |
+; RXQ2.0 --->| |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |_______________|
+; |
+; +-----------> SINK0 (route miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+encap = ethernet
+;encap = ethernet_qinq
+;encap = ethernet_mpls
+n_arp_entries = 1024
+ip_hdr_offset = 270
+arp_key_offset = 128
diff --git a/examples/ip_pipeline/config/l3fwd_arp.sh b/examples/ip_pipeline/config/l3fwd_arp.sh
new file mode 100644
index 00000000..20bea582
--- /dev/null
+++ b/examples/ip_pipeline/config/l3fwd_arp.sh
@@ -0,0 +1,43 @@
+#
+# run ./config/l3fwd_arp.sh
+#
+
+################################################################################
+# ARP
+################################################################################
+p 1 arp add default 4 #SINK0
+p 1 arp add 0 10.0.0.1 a0:b0:c0:d0:e0:f0
+p 1 arp add 1 11.0.0.1 a1:b1:c1:d1:e1:f1
+p 1 arp add 2 12.0.0.1 a2:b2:c2:d2:e2:f2
+p 1 arp add 3 13.0.0.1 a3:b3:c3:d3:e3:f3
+p 1 arp ls
+
+################################################################################
+# Routing: encap = ethernet, arp = on
+################################################################################
+p 1 route add default 4 #SINK0
+p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1
+p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1
+p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1
+p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1
+p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_qinq, arp = on
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1 qinq 1000 2000
+#p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1 qinq 1001 2001
+#p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1 qinq 1002 2002
+#p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1 qinq 1003 2003
+#p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_mpls, arp = on
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1 mpls 1000:2000
+#p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1 mpls 1001:2001
+#p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1 mpls 1002:2002
+#p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1 mpls 1003:2003
+#p 1 route ls
diff --git a/examples/ip_pipeline/config/network_layers.cfg b/examples/ip_pipeline/config/network_layers.cfg
new file mode 100644
index 00000000..8054d9fe
--- /dev/null
+++ b/examples/ip_pipeline/config/network_layers.cfg
@@ -0,0 +1,223 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; The diagram below shows how additional protocol components can be plugged into
+; the IP layer implemented by the ip_pipeline application. Pick your favorite
+; open source components for dynamic ARP, ICMP, UDP or TCP termination, etc and
+; connect them through SWQs to the IP infrastructure.
+;
+; The input packets with local destination are sent to the UDP/TCP applications
+; while the input packets with remote destination are routed back to the
+; network. Additional features can easily be added to this setup:
+; * IP Reassembly: add SWQs with IP reassembly enabled (typically required for
+; the input traffic with local destination);
+; * IP Fragmentation: add SWQs with IP fragmentation enabled (typically
+; required to enforce the MTU for the routed output traffic);
+; * Traffic Metering: add Flow Action pipeline instances (e.g. for metering the
+; TCP connections or ICMP input traffic);
+; * Traffic Management: add TMs for the required output LINKs;
+; * Protocol encapsulations (QinQ, MPLS) for the output packets: part of the
+; routing pipeline configuration.
+;
+; _________ _________
+; | | | |
+; | UDP | | TCP |
+; | App | | App |
+; |_________| |_________|
+; ^ | ^ |
+; __|___V__ __|___V__
+; | | SWQ0 (UDP TX) | | SWQ1 (TCP TX)
+; | UDP |-------+ | TCP |------------+
+; | | | | | |
+; |_________| | |_________| |
+; ^ | ^ |
+; | SWQ2 | | SWQ3 |
+; | (UDP RX) | | (TCP RX) |
+; ____|____ | ____|____ |
+; | | | | | |
+; RXQ<0..3>.1 ------>|Firewall +--->| | +------>| Flow +--->| |
+; (UDP local dest) | (P2) | SINK0 | | | (P3) | SINK1 |
+; |_________| (Deny)| | |_________| (RST) |
+; RXQ<0..3>.2 -------------------------|-----+ |
+; (TCP local dest) | |
+; | +------------------------------+
+; | |
+; _V_____V_
+; | |
+; | Routing | TXQ<0..3>.0
+; RXQ<0..3>.0 ---------------------->| & ARP +----------------------------->
+; (IP remote dest) | (P1) |
+; |_________|
+; | ^ |
+; SWQ4 +-------------+ | | SWQ5 (ARP miss)
+; (Route miss) | | +------------+
+; | +-------------+ |
+; ___V__|__ SWQ6 ____V____
+; | | (ICMP TX) | | TXQ<0..3>.1
+; RXQ<0..3>.3 ------>| ICMP | +------>| Dyn ARP +------------->
+; (IP local dest) | | | | |
+; |_________| | |_________|
+; RXQ<0..3>.4 -------------------------------+
+; (ARP)
+;
+; This configuration file implements the diagram presented below, where the
+; dynamic ARP, ICMP, UDP and TCP components have been stubbed out and replaced
+; with loop-back and packet drop devices.
+;
+; _________ _________
+; | | SWQ0 (UDP TX) | | SWQ1 (TCP TX)
+; |Loobpack |-------+ |Loopback |------------+
+; | (P4) | | | (P5) | |
+; |_________| | |_________| |
+; ^ | ^ |
+; | SWQ2 | | SWQ3 |
+; | (UDP RX) | | (TCP RX) |
+; ____|____ | ____|____ |
+; | | | | | |
+; RXQ<0..3>.1 ------>|Firewall +--->| | +------>| Flow +--->| |
+; (UDP local dest) | (P2) | SINK0 | | | (P3) | SINK1 |
+; |_________| (Deny)| | |_________| (RST) |
+; RXQ<0..3>.2 -------------------------|-----+ |
+; (TCP local dest) | |
+; | +------------------------------+
+; | |
+; _V_____V_
+; | |
+; | Routing | TXQ<0..3>.0
+; RXQ<0..3>.0 ---------------------->| & ARP +----------------------------->
+; (IP remote dest) | (P1) |
+; |_________|
+; | |
+; SINK2 |<---+ +--->| SINK3
+; (Route miss) (ARP miss)
+;
+; _________ _________
+; | | | |
+; RXQ<0..3>.3 ------>| Drop +--->| SINK<4..7> +------>| Drop +--->| SINK<8..11>
+; (IP local dest) | (P6) | (IP local dest) | | (P7) | (ARP)
+; |_________| | |_________|
+; RXQ<0..3>.4 ------------------------------------+
+; (ARP)
+;
+;
+; Input packet: Ethernet/IPv4 or Ethernet/ARP
+; Output packet: Ethernet/IPv4 or Ethernet/ARP
+;
+; Packet buffer layout (for input IPv4 packets):
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+; 4 ICMP/UDP/TCP header 290 8/8/20
+
+[EAL]
+log_level = 0
+
+[LINK0]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK1]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK2]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK3]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0 SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2 SINK3
+port_local_dest = 4 ; SINK2 (Drop)
+n_arp_entries = 1000
+ip_hdr_offset = 270
+arp_key_offset = 128
+
+[PIPELINE2]
+type = FIREWALL
+core = 1
+pktq_in = RXQ0.1 RXQ1.1 RXQ2.1 RXQ3.1
+pktq_out = SWQ2 SINK0
+n_rules = 4096
+
+[PIPELINE3]
+type = FLOW_CLASSIFICATION
+core = 1
+pktq_in = RXQ0.2 RXQ1.2 RXQ2.2 RXQ3.2
+pktq_out = SWQ3 SINK1
+n_flows = 65536
+key_size = 16 ; IPv4 5-tuple key size
+key_offset = 278 ; IPv4 5-tuple key offset
+key_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF ; IPv4 5-tuple key mask
+flowid_offset = 128 ; Flow ID effectively acts as TCP socket ID
+
+[PIPELINE4]
+type = PASS-THROUGH ; Loop-back (UDP place-holder)
+core = 1
+pktq_in = SWQ2
+pktq_out = SWQ0
+
+[PIPELINE5]
+type = PASS-THROUGH ; Loop-back (TCP place-holder)
+core = 1
+pktq_in = SWQ3
+pktq_out = SWQ1
+
+[PIPELINE6]
+type = PASS-THROUGH ; Drop (ICMP place-holder)
+core = 1
+pktq_in = RXQ0.3 RXQ1.3 RXQ2.3 RXQ3.3
+pktq_out = SINK4 SINK5 SINK6 SINK7
+
+[PIPELINE7]
+type = PASS-THROUGH ; Drop (Dynamic ARP place-holder)
+core = 1
+pktq_in = RXQ0.4 RXQ1.4 RXQ2.4 RXQ3.4
+pktq_out = SINK8 SINK9 SINK10 SINK11
diff --git a/examples/ip_pipeline/config/network_layers.sh b/examples/ip_pipeline/config/network_layers.sh
new file mode 100644
index 00000000..3b86bebd
--- /dev/null
+++ b/examples/ip_pipeline/config/network_layers.sh
@@ -0,0 +1,79 @@
+#
+# run ./config/network_layers.sh
+#
+
+################################################################################
+# Link configuration
+################################################################################
+# Routes added implicitly when links are brought UP:
+# IP Prefix = 10.0.0.1/16 => (Port 0, Local)
+# IP Prefix = 10.0.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.1.0.1/16 => (Port 1, Local)
+# IP Prefix = 10.1.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.2.0.1/16 => (Port 2, Local)
+# IP Prefix = 10.2.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.3.0.1/16 => (Port 3, Local)
+# IP Prefix = 10.3.0.1/32 => (Port 4, Local)
+link 0 down
+link 1 down
+link 2 down
+link 3 down
+link 0 config 10.0.0.1 16
+link 1 config 10.1.0.1 16
+link 2 config 10.2.0.1 16
+link 3 config 10.3.0.1 16
+link 0 up
+link 1 up
+link 2 up
+link 3 up
+#link ls
+
+################################################################################
+# Static ARP
+################################################################################
+p 1 arp add default 5 #SINK3
+p 1 arp add 0 10.0.0.2 a0:b0:c0:d0:e0:f0
+p 1 arp add 1 10.1.0.2 a1:b1:c1:d1:e1:f1
+p 1 arp add 2 10.2.0.2 a2:b2:c2:d2:e2:f2
+p 1 arp add 3 10.3.0.2 a3:b3:c3:d3:e3:f3
+#p 1 arp ls
+
+################################################################################
+# Routes
+################################################################################
+p 1 route add default 4 #SINK2
+p 1 route add 100.0.0.0 16 port 0 ether 10.0.0.2
+p 1 route add 100.1.0.0 16 port 1 ether 10.1.0.2
+p 1 route add 100.2.0.0 16 port 2 ether 10.2.0.2
+p 1 route add 100.3.0.0 16 port 3 ether 10.3.0.2
+#p 1 route ls
+
+################################################################################
+# Local destination UDP traffic
+################################################################################
+# Prio = Lowest: [SA = ANY, DA = ANY, SP = ANY, DP = ANY, PROTO = ANY] => Drop
+# Prio = 1 (High): [SA = ANY, DA = 10.0.0.1, SP = ANY, DP = 1000, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.1.0.1, SP = ANY, DP = 1001, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.2.0.1, SP = ANY, DP = 1002, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.3.0.1, SP = ANY, DP = 1003, PROTO = UDP] => Allow
+p 1 firewall add default 1 #SINK0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.0.0.1 32 0 65535 1000 1000 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.1.0.1 32 0 65535 1001 1001 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.2.0.1 32 0 65535 1002 1002 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.3.0.1 32 0 65535 1003 1003 17 0xF port 0
+#p 2 firewall ls
+
+################################################################################
+# Local destination TCP traffic
+################################################################################
+# Unknown connection => Drop
+# TCP [SA = 100.0.0.10, DA = 10.0.0.1, SP = 1000, DP = 80] => socket ID = 0
+# TCP [SA = 100.1.0.10, DA = 10.1.0.1, SP = 1001, DP = 80] => socket ID = 1
+# TCP [SA = 100.2.0.10, DA = 10.2.0.1, SP = 1002, DP = 80] => socket ID = 2
+# TCP [SA = 100.3.0.10, DA = 10.3.0.1, SP = 1003, DP = 80] => socket ID = 3
+p 3 flow add default 1 #SINK1
+p 3 flow add ipv4 100.0.0.10 10.0.0.1 1000 80 6 port 1 id 0
+p 3 flow add ipv4 100.1.0.10 10.1.0.1 1001 80 6 port 1 id 1
+p 3 flow add ipv4 100.2.0.10 10.2.0.1 1002 80 6 port 1 id 2
+p 3 flow add ipv4 100.3.0.10 10.3.0.1 1003 80 6 port 1 id 3
+#p 3 flow ls
diff --git a/examples/ip_pipeline/config/pipeline-to-core-mapping.py b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
new file mode 100755
index 00000000..37b131c6
--- /dev/null
+++ b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
@@ -0,0 +1,936 @@
+#! /usr/bin/python2
+
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# This script maps the set of pipelines identified (MASTER pipelines are
+# ignored) from the input configuration file to the set of cores
+# provided as input argument and creates configuration files for each of
+# the mapping combinations.
+#
+
+from __future__ import print_function
+import sys
+import errno
+import os
+import re
+import array
+import itertools
+import re
+import argparse
+from collections import namedtuple
+
+# default values
+enable_stage0_traceout = 1
+enable_stage1_traceout = 1
+enable_stage2_traceout = 1
+
+enable_stage1_fileout = 1
+enable_stage2_fileout = 1
+
+Constants = namedtuple('Constants', ['MAX_CORES', 'MAX_PIPELINES'])
+constants = Constants(16, 64)
+
+# pattern for physical core
+pattern_phycore = '^(s|S)\d(c|C)[1-9][0-9]*$'
+reg_phycore = re.compile(pattern_phycore)
+
+
+def popcount(mask):
+ return bin(mask).count("1")
+
+
+def len2mask(length):
+ if (length == 0):
+ return 0
+
+ if (length > 64):
+ sys.exit('error: len2mask - length %i > 64. exiting' % length)
+
+ return int('1' * length, 2)
+
+
+def bitstring_write(n, n_bits):
+ tmpstr = ""
+ if (n_bits > 64):
+ return
+
+ i = n_bits - 1
+ while (i >= 0):
+ cond = (n & (1 << i))
+ if (cond):
+ print('1', end='')
+ tmpstr += '1'
+ else:
+ print('0', end='')
+ tmpstr += '0'
+ i -= 1
+ return tmpstr
+
+
+class Cores0:
+
+ def __init__(self):
+ self.n_pipelines = 0
+
+
+class Cores1:
+
+ def __init__(self):
+ self.pipelines = 0
+ self.n_pipelines = 0
+
+
+class Cores2:
+
+ def __init__(self):
+ self.pipelines = 0
+ self.n_pipelines = 0
+ self.counter = 0
+ self.counter_max = 0
+ self.bitpos = array.array(
+ "L", itertools.repeat(0, constants.MAX_PIPELINES))
+
+
+class Context0:
+
+ def __init__(self):
+ self.cores = [Cores0() for i in range(0, constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.n_pipelines0 = 0
+ self.pos = 0
+ self.file_comment = ""
+ self.ctx1 = None
+ self.ctx2 = None
+
+ def stage0_print(self):
+ print('printing Context0 obj')
+ print('c0.cores(n_pipelines) = [ ', end='')
+ for cores_count in range(0, constants.MAX_CORES):
+ print(self.cores[cores_count].n_pipelines, end=' ')
+ print(']')
+ print('c0.n_cores = %d' % self.n_cores)
+ print('c0.n_pipelines = %d' % self.n_pipelines)
+ print('c0.n_pipelines0 = %d' % self.n_pipelines0)
+ print('c0.pos = %d' % self.pos)
+ print('c0.file_comment = %s' % self.file_comment)
+ if (self.ctx1 is not None):
+ print('c0.ctx1 = ', end='')
+ print(repr(self.ctx1))
+ else:
+ print('c0.ctx1 = None')
+
+ if (self.ctx2 is not None):
+ print('c0.ctx2 = ', end='')
+ print(repr(self.ctx2))
+ else:
+ print('c0.ctx2 = None')
+
+ def stage0_init(self, num_cores, num_pipelines, ctx1, ctx2):
+ self.n_cores = num_cores
+ self.n_pipelines = num_pipelines
+ self.ctx1 = ctx1
+ self.ctx2 = ctx2
+
+ def stage0_process(self):
+ # stage0 init
+ self.cores[0].n_pipelines = self.n_pipelines
+ self.n_pipelines0 = 0
+ self.pos = 1
+
+ while True:
+ # go forward
+ while True:
+ if ((self.pos < self.n_cores) and (self.n_pipelines0 > 0)):
+ self.cores[self.pos].n_pipelines = min(
+ self.cores[self.pos - 1].n_pipelines,
+ self.n_pipelines0)
+ self.n_pipelines0 -= self.cores[self.pos].n_pipelines
+ self.pos += 1
+ else:
+ break
+
+ # check solution
+ if (self.n_pipelines0 == 0):
+ self.stage0_log()
+ self.ctx1.stage1_init(self, self.ctx2) # self is object c0
+ self.ctx1.stage1_process()
+
+ # go backward
+ while True:
+ if (self.pos == 0):
+ return
+
+ self.pos -= 1
+ if ((self.cores[self.pos].n_pipelines > 1) and
+ (self.pos != (self.n_cores - 1))):
+ break
+
+ self.n_pipelines0 += self.cores[self.pos].n_pipelines
+ self.cores[self.pos].n_pipelines = 0
+
+ # rearm
+ self.cores[self.pos].n_pipelines -= 1
+ self.n_pipelines0 += 1
+ self.pos += 1
+
+ def stage0_log(self):
+ tmp_file_comment = ""
+ if(enable_stage0_traceout != 1):
+ return
+
+ print('STAGE0: ', end='')
+ tmp_file_comment += 'STAGE0: '
+ for cores_count in range(0, self.n_cores):
+ print('C%d = %d\t'
+ % (cores_count,
+ self.cores[cores_count].n_pipelines), end='')
+ tmp_file_comment += "C{} = {}\t".format(
+ cores_count, self.cores[cores_count].n_pipelines)
+ # end for
+ print('')
+ self.ctx1.stage0_file_comment = tmp_file_comment
+ self.ctx2.stage0_file_comment = tmp_file_comment
+
+
+class Context1:
+ _fileTrace = None
+
+ def __init__(self):
+ self.cores = [Cores1() for i in range(constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.stage0_file_comment = ""
+ self.stage1_file_comment = ""
+
+ self.ctx2 = None
+ self.arr_pipelines2cores = []
+
+ def stage1_reset(self):
+ for i in range(constants.MAX_CORES):
+ self.cores[i].pipelines = 0
+ self.cores[i].n_pipelines = 0
+
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.ctx2 = None
+ # clear list
+ del self.arr_pipelines2cores[:]
+
+ def stage1_print(self):
+ print('printing Context1 obj')
+ print('ctx1.cores(pipelines,n_pipelines) = [ ', end='')
+ for cores_count in range(0, constants.MAX_CORES):
+ print('(%d,%d)' % (self.cores[cores_count].pipelines,
+ self.cores[cores_count].n_pipelines), end=' ')
+ print(']')
+ print('ctx1.n_cores = %d' % self.n_cores)
+ print('ctx1.n_pipelines = %d' % self.n_pipelines)
+ print('ctx1.pos = %d' % self.pos)
+ print('ctx1.stage0_file_comment = %s' % self.stage0_file_comment)
+ print('ctx1.stage1_file_comment = %s' % self.stage1_file_comment)
+ if (self.ctx2 is not None):
+ print('ctx1.ctx2 = ', end='')
+ print(self.ctx2)
+ else:
+ print('ctx1.ctx2 = None')
+
+ def stage1_init(self, c0, ctx2):
+ self.stage1_reset()
+ self.n_cores = 0
+ while (c0.cores[self.n_cores].n_pipelines > 0):
+ self.n_cores += 1
+
+ self.n_pipelines = c0.n_pipelines
+ self.ctx2 = ctx2
+
+ self.arr_pipelines2cores = [0] * self.n_pipelines
+
+ i = 0
+ while (i < self.n_cores):
+ self.cores[i].n_pipelines = c0.cores[i].n_pipelines
+ i += 1
+
+ def stage1_process(self):
+ pipelines_max = len2mask(self.n_pipelines)
+ while True:
+ pos = 0
+ overlap = 0
+
+ if (self.cores[self.pos].pipelines == pipelines_max):
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].pipelines = 0
+ self.pos -= 1
+ continue
+
+ self.cores[self.pos].pipelines += 1
+ if (popcount(self.cores[self.pos].pipelines) !=
+ self.cores[self.pos].n_pipelines):
+ continue
+
+ overlap = 0
+ pos = 0
+ while (pos < self.pos):
+ if ((self.cores[self.pos].pipelines) &
+ (self.cores[pos].pipelines)):
+ overlap = 1
+ break
+ pos += 1
+
+ if (overlap):
+ continue
+
+ if ((self.pos > 0) and
+ ((self.cores[self.pos].n_pipelines) ==
+ (self.cores[self.pos - 1].n_pipelines)) and
+ ((self.cores[self.pos].pipelines) <
+ (self.cores[self.pos - 1].pipelines))):
+ continue
+
+ if (self.pos == self.n_cores - 1):
+ self.stage1_log()
+ self.ctx2.stage2_init(self)
+ self.ctx2.stage2_process()
+
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].pipelines = 0
+ self.pos -= 1
+ continue
+
+ self.pos += 1
+
+ def stage1_log(self):
+ tmp_file_comment = ""
+ if(enable_stage1_traceout == 1):
+ print('STAGE1: ', end='')
+ tmp_file_comment += 'STAGE1: '
+ i = 0
+ while (i < self.n_cores):
+ print('C%d = [' % i, end='')
+ tmp_file_comment += "C{} = [".format(i)
+
+ j = self.n_pipelines - 1
+ while (j >= 0):
+ cond = ((self.cores[i].pipelines) & (1 << j))
+ if (cond):
+ print('1', end='')
+ tmp_file_comment += '1'
+ else:
+ print('0', end='')
+ tmp_file_comment += '0'
+ j -= 1
+
+ print(']\t', end='')
+ tmp_file_comment += ']\t'
+ i += 1
+
+ print('\n', end='')
+ self.stage1_file_comment = tmp_file_comment
+ self.ctx2.stage1_file_comment = tmp_file_comment
+
+ # check if file traceing is enabled
+ if(enable_stage1_fileout != 1):
+ return
+
+ # spit out the combination to file
+ self.stage1_process_file()
+
+ def stage1_updateCoresInBuf(self, nPipeline, sCore):
+ rePipeline = self._fileTrace.arr_pipelines[nPipeline]
+ rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
+ reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
+ sSubs = 'core = ' + sCore + '\n'
+
+ reg_pipeline = re.compile(rePipeline)
+ search_match = reg_pipeline.search(self._fileTrace.in_buf)
+
+ if(search_match):
+ pos = search_match.start()
+ substr1 = self._fileTrace.in_buf[:pos]
+ substr2 = self._fileTrace.in_buf[pos:]
+ substr2 = re.sub(reCore, sSubs, substr2, 1)
+ self._fileTrace.in_buf = substr1 + substr2
+
+ def stage1_process_file(self):
+ outFileName = os.path.join(self._fileTrace.out_path,
+ self._fileTrace.prefix_outfile)
+ outFileName += "_{}CoReS".format(self.n_cores)
+
+ i = 0 # represents core number
+ while (i < self.n_cores):
+ j = self.n_pipelines - 1
+ pipeline_idx = 0
+ while(j >= 0):
+ cond = ((self.cores[i].pipelines) & (1 << j))
+ if (cond):
+ # update the pipelines array to match the core
+ # only in case of cond match
+ self.arr_pipelines2cores[
+ pipeline_idx] = fileTrace.in_physical_cores[i]
+
+ j -= 1
+ pipeline_idx += 1
+
+ i += 1
+
+ # update the in_buf as per the arr_pipelines2cores
+ for pipeline_idx in range(len(self.arr_pipelines2cores)):
+ outFileName += "_{}".format(self.arr_pipelines2cores[pipeline_idx])
+ self.stage1_updateCoresInBuf(
+ pipeline_idx, self.arr_pipelines2cores[pipeline_idx])
+
+ # by now the in_buf is all set to be written to file
+ outFileName += self._fileTrace.suffix_outfile
+ outputFile = open(outFileName, "w")
+
+ # write out the comments
+ strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
+ outputFile.write(
+ "; =============== Pipeline-to-Core Mapping ================\n"
+ "; Generated from file {}\n"
+ "; Input pipelines = {}\n"
+ "; Input cores = {}\n"
+ "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {}\n"
+ .format(
+ self._fileTrace.in_file_namepath,
+ fileTrace.arr_pipelines,
+ fileTrace.in_physical_cores,
+ self._fileTrace.n_pipelines,
+ self._fileTrace.n_cores,
+ strTruncated,
+ self._fileTrace.hyper_thread))
+
+ outputFile.write(
+ "; {stg0cmt}\n"
+ "; {stg1cmt}\n"
+ "; ========================================================\n"
+ "; \n"
+ .format(
+ stg0cmt=self.stage0_file_comment,
+ stg1cmt=self.stage1_file_comment))
+
+ # write buffer contents
+ outputFile.write(self._fileTrace.in_buf)
+ outputFile.flush()
+ outputFile.close()
+
+
+class Context2:
+ _fileTrace = None
+
+ def __init__(self):
+ self.cores = [Cores2() for i in range(constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.stage0_file_comment = ""
+ self.stage1_file_comment = ""
+ self.stage2_file_comment = ""
+
+ # each array entry is a pipeline mapped to core stored as string
+ # pipeline ranging from 1 to n, however stored in zero based array
+ self.arr2_pipelines2cores = []
+
+ def stage2_print(self):
+ print('printing Context2 obj')
+ print('ctx2.cores(pipelines, n_pipelines, counter, counter_max) =')
+ for cores_count in range(0, constants.MAX_CORES):
+ print('core[%d] = (%d,%d,%d,%d)' % (
+ cores_count,
+ self.cores[cores_count].pipelines,
+ self.cores[cores_count].n_pipelines,
+ self.cores[cores_count].counter,
+ self.cores[cores_count].counter_max))
+
+ print('ctx2.n_cores = %d' % self.n_cores, end='')
+ print('ctx2.n_pipelines = %d' % self.n_pipelines, end='')
+ print('ctx2.pos = %d' % self.pos)
+ print('ctx2.stage0_file_comment = %s' %
+ self.self.stage0_file_comment)
+ print('ctx2.stage1_file_comment = %s' %
+ self.self.stage1_file_comment)
+ print('ctx2.stage2_file_comment = %s' %
+ self.self.stage2_file_comment)
+
+ def stage2_reset(self):
+ for i in range(0, constants.MAX_CORES):
+ self.cores[i].pipelines = 0
+ self.cores[i].n_pipelines = 0
+ self.cores[i].counter = 0
+ self.cores[i].counter_max = 0
+
+ for idx in range(0, constants.MAX_PIPELINES):
+ self.cores[i].bitpos[idx] = 0
+
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ # clear list
+ del self.arr2_pipelines2cores[:]
+
+ def bitpos_load(self, coreidx):
+ i = j = 0
+ while (i < self.n_pipelines):
+ if ((self.cores[coreidx].pipelines) &
+ (1 << i)):
+ self.cores[coreidx].bitpos[j] = i
+ j += 1
+ i += 1
+ self.cores[coreidx].n_pipelines = j
+
+ def bitpos_apply(self, in_buf, pos, n_pos):
+ out = 0
+ for i in range(0, n_pos):
+ out |= (in_buf & (1 << i)) << (pos[i] - i)
+
+ return out
+
+ def stage2_init(self, ctx1):
+ self.stage2_reset()
+ self.n_cores = ctx1.n_cores
+ self.n_pipelines = ctx1.n_pipelines
+
+ self.arr2_pipelines2cores = [''] * self.n_pipelines
+
+ core_idx = 0
+ while (core_idx < self.n_cores):
+ self.cores[core_idx].pipelines = ctx1.cores[core_idx].pipelines
+
+ self.bitpos_load(core_idx)
+ core_idx += 1
+
+ def stage2_log(self):
+ tmp_file_comment = ""
+ if(enable_stage2_traceout == 1):
+ print('STAGE2: ', end='')
+ tmp_file_comment += 'STAGE2: '
+
+ for i in range(0, self.n_cores):
+ mask = len2mask(self.cores[i].n_pipelines)
+ pipelines_ht0 = self.bitpos_apply(
+ (~self.cores[i].counter) & mask,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ pipelines_ht1 = self.bitpos_apply(
+ self.cores[i].counter,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ print('C%dHT0 = [' % i, end='')
+ tmp_file_comment += "C{}HT0 = [".format(i)
+ tmp_file_comment += bitstring_write(
+ pipelines_ht0, self.n_pipelines)
+
+ print(']\tC%dHT1 = [' % i, end='')
+ tmp_file_comment += "]\tC{}HT1 = [".format(i)
+ tmp_file_comment += bitstring_write(
+ pipelines_ht1, self.n_pipelines)
+ print(']\t', end='')
+ tmp_file_comment += ']\t'
+
+ print('')
+ self.stage2_file_comment = tmp_file_comment
+
+ # check if file traceing is enabled
+ if(enable_stage2_fileout != 1):
+ return
+ # spit out the combination to file
+ self.stage2_process_file()
+
+ def stage2_updateCoresInBuf(self, nPipeline, sCore):
+ rePipeline = self._fileTrace.arr_pipelines[nPipeline]
+ rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
+ reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
+ sSubs = 'core = ' + sCore + '\n'
+
+ reg_pipeline = re.compile(rePipeline)
+ search_match = reg_pipeline.search(self._fileTrace.in_buf)
+
+ if(search_match):
+ pos = search_match.start()
+ substr1 = self._fileTrace.in_buf[:pos]
+ substr2 = self._fileTrace.in_buf[pos:]
+ substr2 = re.sub(reCore, sSubs, substr2, 1)
+ self._fileTrace.in_buf = substr1 + substr2
+
+ def pipelines2cores(self, n, n_bits, nCore, bHT):
+ if (n_bits > 64):
+ return
+
+ i = n_bits - 1
+ pipeline_idx = 0
+ while (i >= 0):
+ cond = (n & (1 << i))
+ if (cond):
+ # update the pipelines array to match the core
+ # only in case of cond match
+ # PIPELINE0 and core 0 are reserved
+ if(bHT):
+ tmpCore = fileTrace.in_physical_cores[nCore] + 'h'
+ self.arr2_pipelines2cores[pipeline_idx] = tmpCore
+ else:
+ self.arr2_pipelines2cores[pipeline_idx] = \
+ fileTrace.in_physical_cores[nCore]
+
+ i -= 1
+ pipeline_idx += 1
+
+ def stage2_process_file(self):
+ outFileName = os.path.join(self._fileTrace.out_path,
+ self._fileTrace.prefix_outfile)
+ outFileName += "_{}CoReS".format(self.n_cores)
+
+ for i in range(0, self.n_cores):
+ mask = len2mask(self.cores[i].n_pipelines)
+ pipelines_ht0 = self.bitpos_apply((~self.cores[i].counter) & mask,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ pipelines_ht1 = self.bitpos_apply(self.cores[i].counter,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ # update pipelines to core mapping
+ self.pipelines2cores(pipelines_ht0, self.n_pipelines, i, False)
+ self.pipelines2cores(pipelines_ht1, self.n_pipelines, i, True)
+
+ # update the in_buf as per the arr_pipelines2cores
+ for pipeline_idx in range(len(self.arr2_pipelines2cores)):
+ outFileName += "_{}".format(
+ self.arr2_pipelines2cores[pipeline_idx])
+ self.stage2_updateCoresInBuf(
+ pipeline_idx, self.arr2_pipelines2cores[pipeline_idx])
+
+ # by now the in_buf is all set to be written to file
+ outFileName += self._fileTrace.suffix_outfile
+ outputFile = open(outFileName, "w")
+
+ # write the file comments
+ strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
+ outputFile.write(
+ "; =============== Pipeline-to-Core Mapping ================\n"
+ "; Generated from file {}\n"
+ "; Input pipelines = {}\n"
+ "; Input cores = {}\n"
+ "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {} \n"
+ .format(
+ self._fileTrace.in_file_namepath,
+ fileTrace.arr_pipelines,
+ fileTrace.in_physical_cores,
+ self._fileTrace.n_pipelines,
+ self._fileTrace.n_cores,
+ strTruncated,
+ self._fileTrace.hyper_thread))
+
+ outputFile.write(
+ "; {stg0cmt}\n"
+ "; {stg1cmt}\n"
+ "; {stg2cmt}\n"
+ "; ========================================================\n"
+ "; \n"
+ .format(
+ stg0cmt=self.stage0_file_comment,
+ stg1cmt=self.stage1_file_comment,
+ stg2cmt=self.stage2_file_comment))
+
+ # write the buffer contents
+ outputFile.write(self._fileTrace.in_buf)
+ outputFile.flush()
+ outputFile.close()
+
+ def stage2_process(self):
+ i = 0
+ while(i < self.n_cores):
+ self.cores[i].counter_max = len2mask(
+ self.cores[i].n_pipelines - 1)
+ i += 1
+
+ self.pos = self.n_cores - 1
+ while True:
+ if (self.pos == self.n_cores - 1):
+ self.stage2_log()
+
+ if (self.cores[self.pos].counter ==
+ self.cores[self.pos].counter_max):
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].counter = 0
+ self.pos -= 1
+ continue
+
+ self.cores[self.pos].counter += 1
+ if(self.pos < self.n_cores - 1):
+ self.pos += 1
+
+
+class FileTrace:
+
+ def __init__(self, filenamepath):
+ self.in_file_namepath = os.path.abspath(filenamepath)
+ self.in_filename = os.path.basename(self.in_file_namepath)
+ self.in_path = os.path.dirname(self.in_file_namepath)
+
+ filenamesplit = self.in_filename.split('.')
+ self.prefix_outfile = filenamesplit[0]
+ self.suffix_outfile = ".cfg"
+
+ # output folder: in the same folder as input file
+ # create new folder in the name of input file
+ self.out_path = os.path.join(
+ os.path.abspath(os.path.dirname(__file__)),
+ self.prefix_outfile)
+
+ try:
+ os.makedirs(self.out_path)
+ except OSError as excep:
+ if excep.errno == errno.EEXIST and os.path.isdir(self.out_path):
+ pass
+ else:
+ raise
+
+ self.in_buf = None
+ self.arr_pipelines = [] # holds the positions of search
+
+ self.max_cores = 15
+ self.max_pipelines = 15
+
+ self.in_physical_cores = None
+ self.hyper_thread = None
+
+ # save the num of pipelines determined from input file
+ self.n_pipelines = 0
+ # save the num of cores input (or the truncated value)
+ self.n_cores = 0
+ self.ncores_truncated = False
+
+ def print_TraceFile(self):
+ print("self.in_file_namepath = ", self.in_file_namepath)
+ print("self.in_filename = ", self.in_filename)
+ print("self.in_path = ", self.in_path)
+ print("self.out_path = ", self.out_path)
+ print("self.prefix_outfile = ", self.prefix_outfile)
+ print("self.suffix_outfile = ", self.suffix_outfile)
+ print("self.in_buf = ", self.in_buf)
+ print("self.arr_pipelines =", self.arr_pipelines)
+ print("self.in_physical_cores", self.in_physical_cores)
+ print("self.hyper_thread", self.hyper_thread)
+
+
+def process(n_cores, n_pipelines, fileTrace):
+ '''process and map pipelines, cores.'''
+ if (n_cores == 0):
+ sys.exit('N_CORES is 0, exiting')
+
+ if (n_pipelines == 0):
+ sys.exit('N_PIPELINES is 0, exiting')
+
+ if (n_cores > n_pipelines):
+ print('\nToo many cores, truncating N_CORES to N_PIPELINES')
+ n_cores = n_pipelines
+ fileTrace.ncores_truncated = True
+
+ fileTrace.n_pipelines = n_pipelines
+ fileTrace.n_cores = n_cores
+
+ strTruncated = ("", "(Truncated)")[fileTrace.ncores_truncated]
+ print("N_PIPELINES = {}, N_CORES = {} {}"
+ .format(n_pipelines, n_cores, strTruncated))
+ print("---------------------------------------------------------------")
+
+ ctx0_inst = Context0()
+ ctx1_inst = Context1()
+ ctx2_inst = Context2()
+
+ # initialize the class variables
+ ctx1_inst._fileTrace = fileTrace
+ ctx2_inst._fileTrace = fileTrace
+
+ ctx0_inst.stage0_init(n_cores, n_pipelines, ctx1_inst, ctx2_inst)
+ ctx0_inst.stage0_process()
+
+
+def validate_core(core):
+ match = reg_phycore.match(core)
+ if(match):
+ return True
+ else:
+ return False
+
+
+def validate_phycores(phy_cores):
+ '''validate physical cores, check if unique.'''
+ # eat up whitespaces
+ phy_cores = phy_cores.strip().split(',')
+
+ # check if the core list is unique
+ if(len(phy_cores) != len(set(phy_cores))):
+ print('list of physical cores has duplicates')
+ return None
+
+ for core in phy_cores:
+ if not validate_core(core):
+ print('invalid physical core specified.')
+ return None
+ return phy_cores
+
+
+def scanconfigfile(fileTrace):
+ '''scan input file for pipelines, validate then process.'''
+ # open file
+ filetoscan = open(fileTrace.in_file_namepath, 'r')
+ fileTrace.in_buf = filetoscan.read()
+
+ # reset iterator on open file
+ filetoscan.seek(0)
+
+ # scan input file for pipelines
+ # master pipelines to be ignored
+ pattern_pipeline = r'\[PIPELINE\d*\]'
+ pattern_mastertype = r'type\s*=\s*MASTER'
+
+ pending_pipeline = False
+ for line in filetoscan:
+ match_pipeline = re.search(pattern_pipeline, line)
+ match_type = re.search('type\s*=', line)
+ match_mastertype = re.search(pattern_mastertype, line)
+
+ if(match_pipeline):
+ sPipeline = line[match_pipeline.start():match_pipeline.end()]
+ pending_pipeline = True
+ elif(match_type):
+ # found a type definition...
+ if(match_mastertype is None):
+ # and this is not a master pipeline...
+ if(pending_pipeline):
+ # add it to the list of pipelines to be mapped
+ fileTrace.arr_pipelines.append(sPipeline)
+ pending_pipeline = False
+ else:
+ # and this is a master pipeline...
+ # ignore the current and move on to next
+ sPipeline = ""
+ pending_pipeline = False
+ filetoscan.close()
+
+ # validate if pipelines are unique
+ if(len(fileTrace.arr_pipelines) != len(set(fileTrace.arr_pipelines))):
+ sys.exit('Error: duplicate pipelines in input file')
+
+ num_pipelines = len(fileTrace.arr_pipelines)
+ num_cores = len(fileTrace.in_physical_cores)
+
+ print("-------------------Pipeline-to-core mapping--------------------")
+ print("Input pipelines = {}\nInput cores = {}"
+ .format(fileTrace.arr_pipelines, fileTrace.in_physical_cores))
+
+ # input configuration file validations goes here
+ if (num_cores > fileTrace.max_cores):
+ sys.exit('Error: number of cores specified > max_cores (%d)' %
+ fileTrace.max_cores)
+
+ if (num_pipelines > fileTrace.max_pipelines):
+ sys.exit('Error: number of pipelines in input \
+ cfg file > max_pipelines (%d)' % fileTrace.max_pipelines)
+
+ # call process to generate pipeline-to-core mapping, trace and log
+ process(num_cores, num_pipelines, fileTrace)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='mappipelines')
+
+ reqNamedGrp = parser.add_argument_group('required named args')
+ reqNamedGrp.add_argument(
+ '-i',
+ '--input-file',
+ type=argparse.FileType('r'),
+ help='Input config file',
+ required=True)
+
+ reqNamedGrp.add_argument(
+ '-pc',
+ '--physical-cores',
+ type=validate_phycores,
+ help='''Enter available CPU cores in
+ format:\"<core>,<core>,...\"
+ where each core format: \"s<SOCKETID>c<COREID>\"
+ where SOCKETID={0..9}, COREID={1-99}''',
+ required=True)
+
+ # add optional arguments
+ parser.add_argument(
+ '-ht',
+ '--hyper-thread',
+ help='enable/disable hyper threading. default is ON',
+ default='ON',
+ choices=['ON', 'OFF'])
+
+ parser.add_argument(
+ '-nO',
+ '--no-output-file',
+ help='''disable output config file generation.
+ Output file generation is enabled by default''',
+ action="store_true")
+
+ args = parser.parse_args()
+
+ if(args.physical_cores is None):
+ parser.error("invalid physical_cores specified")
+
+ # create object of FileTrace and initialise
+ fileTrace = FileTrace(args.input_file.name)
+ fileTrace.in_physical_cores = args.physical_cores
+ fileTrace.hyper_thread = args.hyper_thread
+
+ if(fileTrace.hyper_thread == 'OFF'):
+ print("!!!!disabling stage2 HT!!!!")
+ enable_stage2_traceout = 0
+ enable_stage2_fileout = 0
+ elif(fileTrace.hyper_thread == 'ON'):
+ print("!!!!HT enabled. disabling stage1 file generation.!!!!")
+ enable_stage1_fileout = 0
+
+ if(args.no_output_file is True):
+ print("!!!!disabling stage1 and stage2 fileout!!!!")
+ enable_stage1_fileout = 0
+ enable_stage2_fileout = 0
+
+ scanconfigfile(fileTrace)
diff --git a/examples/ip_pipeline/config_check.c b/examples/ip_pipeline/config_check.c
index fd9ff495..af1b6284 100644
--- a/examples/ip_pipeline/config_check.c
+++ b/examples/ip_pipeline/config_check.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,6 +56,26 @@ check_mempools(struct app_params *app)
}
}
+static inline uint32_t
+link_rxq_used(struct app_link_params *link, uint32_t q_id)
+{
+ uint32_t i;
+
+ if ((link->arp_q == q_id) ||
+ (link->tcp_syn_q == q_id) ||
+ (link->ip_local_q == q_id) ||
+ (link->tcp_local_q == q_id) ||
+ (link->udp_local_q == q_id) ||
+ (link->sctp_local_q == q_id))
+ return 1;
+
+ for (i = 0; i < link->n_rss_qs; i++)
+ if (link->rss_qs[i] == q_id)
+ return 1;
+
+ return 0;
+}
+
static void
check_links(struct app_params *app)
{
@@ -90,14 +110,12 @@ check_links(struct app_params *app)
rxq_max = link->udp_local_q;
if (link->sctp_local_q > rxq_max)
rxq_max = link->sctp_local_q;
+ for (i = 0; i < link->n_rss_qs; i++)
+ if (link->rss_qs[i] > rxq_max)
+ rxq_max = link->rss_qs[i];
for (i = 1; i <= rxq_max; i++)
- APP_CHECK(((link->arp_q == i) ||
- (link->tcp_syn_q == i) ||
- (link->ip_local_q == i) ||
- (link->tcp_local_q == i) ||
- (link->udp_local_q == i) ||
- (link->sctp_local_q == i)),
+ APP_CHECK((link_rxq_used(link, i)),
"%s RXQs are not contiguous (A)\n", link->name);
n_rxq = app_link_get_n_rxq(app, link);
@@ -118,7 +136,7 @@ check_links(struct app_params *app)
"%s RXQs are not contiguous (C)\n", link->name);
}
- /* Check that link RXQs are contiguous */
+ /* Check that link TXQs are contiguous */
n_txq = app_link_get_n_txq(app, link);
APP_CHECK((n_txq), "%s does not have any TXQ\n", link->name);
@@ -298,6 +316,29 @@ check_tms(struct app_params *app)
}
static void
+check_knis(struct app_params *app) {
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_kni; i++) {
+ struct app_pktq_kni_params *p = &app->kni_params[i];
+ uint32_t n_readers = app_kni_get_readers(app, p);
+ uint32_t n_writers = app_kni_get_writers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
check_sources(struct app_params *app)
{
uint32_t i;
@@ -435,6 +476,7 @@ app_config_check(struct app_params *app)
check_txqs(app);
check_swqs(app);
check_tms(app);
+ check_knis(app);
check_sources(app);
check_sinks(app);
check_msgqs(app);
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
index e5efd03e..0adca98f 100644
--- a/examples/ip_pipeline/config_parse.c
+++ b/examples/ip_pipeline/config_parse.c
@@ -1,4 +1,4 @@
-/*-
+/*-
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
@@ -30,6 +30,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
@@ -80,6 +81,11 @@ static const struct app_link_params link_params_default = {
.tcp_local_q = 0,
.udp_local_q = 0,
.sctp_local_q = 0,
+ .rss_qs = {0},
+ .n_rss_qs = 0,
+ .rss_proto_ipv4 = ETH_RSS_IPV4,
+ .rss_proto_ipv6 = ETH_RSS_IPV6,
+ .rss_proto_l2 = 0,
.state = 0,
.ip = 0,
.depth = 0,
@@ -103,6 +109,13 @@ static const struct app_link_params link_params_default = {
.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
.split_hdr_size = 0, /* Header split buffer size */
},
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
},
@@ -176,6 +189,20 @@ struct app_pktq_tm_params default_tm_params = {
.burst_write = 32,
};
+struct app_pktq_kni_params default_kni_params = {
+ .parsed = 0,
+ .socket_id = 0,
+ .core_id = 0,
+ .hyper_th_id = 0,
+ .force_bind = 0,
+
+ .mempool_id = 0,
+ .burst_read = 32,
+ .burst_write = 32,
+ .dropless = 0,
+ .n_retries = 0,
+};
+
struct app_pktq_source_params default_source_params = {
.parsed = 0,
.mempool_id = 0,
@@ -229,306 +256,136 @@ app_print_usage(char *prgname)
rte_exit(0, app_usage, prgname, app_params_default.config_file);
}
-#define skip_white_spaces(pos) \
-({ \
- __typeof__(pos) _p = (pos); \
- for ( ; isspace(*_p); _p++); \
- _p; \
+#define APP_PARAM_ADD(set, key) \
+({ \
+ ssize_t pos = APP_PARAM_FIND(set, key); \
+ ssize_t size = RTE_DIM(set); \
+ \
+ if (pos < 0) { \
+ for (pos = 0; pos < size; pos++) { \
+ if (!APP_PARAM_VALID(&((set)[pos]))) \
+ break; \
+ } \
+ \
+ APP_CHECK((pos < size), \
+ "Parse error: size of %s is limited to %u elements",\
+ #set, (uint32_t) size); \
+ \
+ (set)[pos].name = strdup(key); \
+ APP_CHECK(((set)[pos].name), \
+ "Parse error: no free memory"); \
+ } \
+ pos; \
})
-#define PARSER_PARAM_ADD_CHECK(result, params_array, section_name) \
-do { \
- APP_CHECK((result != -EINVAL), \
- "Parse error: no free memory"); \
- APP_CHECK((result != -ENOMEM), \
- "Parse error: too many \"%s\" sections", section_name); \
- APP_CHECK(((result >= 0) && (params_array)[result].parsed == 0),\
- "Parse error: duplicate \"%s\" section", section_name); \
- APP_CHECK((result >= 0), \
- "Parse error in section \"%s\"", section_name); \
-} while (0)
-
-int
-parser_read_arg_bool(const char *p)
-{
- p = skip_white_spaces(p);
- int result = -EINVAL;
-
- if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
- ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
- p += 3;
- result = 1;
- }
-
- if (((p[0] == 'o') && (p[1] == 'n')) ||
- ((p[0] == 'O') && (p[1] == 'N'))) {
- p += 2;
- result = 1;
- }
+#define APP_PARAM_ADD_LINK_FOR_RXQ(app, rxq_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id, queue_id; \
+ \
+ sscanf((rxq_name), "RXQ%" SCNu32 ".%" SCNu32, &link_id, &queue_id);\
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
- if (((p[0] == 'n') && (p[1] == 'o')) ||
- ((p[0] == 'N') && (p[1] == 'O'))) {
- p += 2;
- result = 0;
- }
+#define APP_PARAM_ADD_LINK_FOR_TXQ(app, txq_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id, queue_id; \
+ \
+ sscanf((txq_name), "TXQ%" SCNu32 ".%" SCNu32, &link_id, &queue_id);\
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
- if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
- ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
- p += 3;
- result = 0;
- }
+#define APP_PARAM_ADD_LINK_FOR_TM(app, tm_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id; \
+ \
+ sscanf((tm_name), "TM%" SCNu32, &link_id); \
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
- p = skip_white_spaces(p);
+#define APP_PARAM_ADD_LINK_FOR_KNI(app, kni_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id; \
+ \
+ sscanf((kni_name), "KNI%" SCNu32, &link_id); \
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
- if (p[0] != '\0')
- return -EINVAL;
+#define PARSE_CHECK_DUPLICATE_SECTION(obj) \
+do { \
+ APP_CHECK(((obj)->parsed == 0), \
+ "Parse error: duplicate \"%s\" section", (obj)->name); \
+ (obj)->parsed++; \
+} while (0)
- return result;
-}
+#define PARSE_CHECK_DUPLICATE_SECTION_EAL(obj) \
+do { \
+ APP_CHECK(((obj)->parsed == 0), \
+ "Parse error: duplicate \"%s\" section", "EAL"); \
+ (obj)->parsed++; \
+} while (0)
#define PARSE_ERROR(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": entry \"%s\"\n", section, entry)
+APP_CHECK(exp, "Parse error in section \"%s\": entry \"%s\"", section, entry)
#define PARSE_ERROR_MESSAGE(exp, section, entry, message) \
-APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": %s\n", \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": %s", \
section, entry, message)
+#define PARSE_ERROR_NO_ELEMENTS(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "no elements detected", \
+ section, entry)
+
+#define PARSE_ERROR_TOO_MANY_ELEMENTS(exp, section, entry, max) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "maximum number of elements allowed is %u", \
+ section, entry, max)
+
+#define PARSE_ERROR_INVALID_ELEMENT(exp, section, entry, value) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "Invalid element value \"%s\"", \
+ section, entry, value)
#define PARSE_ERROR_MALLOC(exp) \
-APP_CHECK(exp, "Parse error: no free memory\n")
+APP_CHECK(exp, "Parse error: no free memory")
#define PARSE_ERROR_SECTION(exp, section) \
APP_CHECK(exp, "Parse error in section \"%s\"", section)
#define PARSE_ERROR_SECTION_NO_ENTRIES(exp, section) \
-APP_CHECK(exp, "Parse error in section \"%s\": no entries\n", section)
+APP_CHECK(exp, "Parse error in section \"%s\": no entries", section)
#define PARSE_WARNING_IGNORED(exp, section, entry) \
do \
if (!(exp)) \
fprintf(stderr, "Parse warning in section \"%s\": " \
- "entry \"%s\" is ignored\n", section, entry); \
+ "entry \"%s\" is ignored", section, entry); \
while (0)
#define PARSE_ERROR_INVALID(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": unrecognized entry \"%s\"\n",\
+APP_CHECK(exp, "Parse error in section \"%s\": unrecognized entry \"%s\"",\
section, entry)
#define PARSE_ERROR_DUPLICATE(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": duplicate entry \"%s\"\n",\
+APP_CHECK(exp, "Parse error in section \"%s\": duplicate entry \"%s\"", \
section, entry)
-int
-parser_read_uint64(uint64_t *value, const char *p)
-{
- char *next;
- uint64_t val;
-
- p = skip_white_spaces(p);
- if (!isdigit(*p))
- return -EINVAL;
-
- val = strtoul(p, &next, 10);
- if (p == next)
- return -EINVAL;
-
- p = next;
- switch (*p) {
- case 'T':
- val *= 1024ULL;
- /* fall through */
- case 'G':
- val *= 1024ULL;
- /* fall through */
- case 'M':
- val *= 1024ULL;
- /* fall through */
- case 'k':
- case 'K':
- val *= 1024ULL;
- p++;
- break;
- }
-
- p = skip_white_spaces(p);
- if (*p != '\0')
- return -EINVAL;
-
- *value = val;
- return 0;
-}
-
-int
-parser_read_uint32(uint32_t *value, const char *p)
-{
- uint64_t val = 0;
- int ret = parser_read_uint64(&val, p);
-
- if (ret < 0)
- return ret;
-
- if (val > UINT32_MAX)
- return -ERANGE;
-
- *value = val;
- return 0;
-}
-
-int
-parse_pipeline_core(uint32_t *socket,
- uint32_t *core,
- uint32_t *ht,
- const char *entry)
-{
- size_t num_len;
- char num[8];
-
- uint32_t s = 0, c = 0, h = 0, val;
- uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
- const char *next = skip_white_spaces(entry);
- char type;
-
- /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
- while (*next != '\0') {
- /* If everything parsed nothing should left */
- if (s_parsed && c_parsed && h_parsed)
- return -EINVAL;
-
- type = *next;
- switch (type) {
- case 's':
- case 'S':
- if (s_parsed || c_parsed || h_parsed)
- return -EINVAL;
- s_parsed = 1;
- next++;
- break;
- case 'c':
- case 'C':
- if (c_parsed || h_parsed)
- return -EINVAL;
- c_parsed = 1;
- next++;
- break;
- case 'h':
- case 'H':
- if (h_parsed)
- return -EINVAL;
- h_parsed = 1;
- next++;
- break;
- default:
- /* If it start from digit it must be only core id. */
- if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
- return -EINVAL;
-
- type = 'C';
- }
-
- for (num_len = 0; *next != '\0'; next++, num_len++) {
- if (num_len == RTE_DIM(num))
- return -EINVAL;
-
- if (!isdigit(*next))
- break;
-
- num[num_len] = *next;
- }
-
- if (num_len == 0 && type != 'h' && type != 'H')
- return -EINVAL;
-
- if (num_len != 0 && (type == 'h' || type == 'H'))
- return -EINVAL;
-
- num[num_len] = '\0';
- val = strtol(num, NULL, 10);
-
- h = 0;
- switch (type) {
- case 's':
- case 'S':
- s = val;
- break;
- case 'c':
- case 'C':
- c = val;
- break;
- case 'h':
- case 'H':
- h = 1;
- break;
- }
- }
-
- *socket = s;
- *core = c;
- *ht = h;
- return 0;
-}
-
-static uint32_t
-get_hex_val(char c)
-{
- switch (c) {
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9':
- return c - '0';
- case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
- return c - 'A' + 10;
- case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
- return c - 'a' + 10;
- default:
- return 0;
- }
-}
-
-int
-parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
-{
- char *c;
- uint32_t len, i;
-
- /* Check input parameters */
- if ((src == NULL) ||
- (dst == NULL) ||
- (size == NULL) ||
- (*size == 0))
- return -1;
-
- len = strlen(src);
- if (((len & 3) != 0) ||
- (len > (*size) * 2))
- return -1;
- *size = len / 2;
-
- for (c = src; *c != 0; c++) {
- if ((((*c) >= '0') && ((*c) <= '9')) ||
- (((*c) >= 'A') && ((*c) <= 'F')) ||
- (((*c) >= 'a') && ((*c) <= 'f')))
- continue;
-
- return -1;
- }
-
- /* Convert chars to bytes */
- for (i = 0; i < *size; i++)
- dst[i] = get_hex_val(src[2 * i]) * 16 +
- get_hex_val(src[2 * i + 1]);
-
- return 0;
-}
-
-static size_t
-skip_digits(const char *src)
-{
- size_t i;
-
- for (i = 0; isdigit(src[i]); i++);
-
- return i;
-}
-
static int
validate_name(const char *name, const char *prefix, int num)
{
@@ -584,6 +441,8 @@ parse_eal(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+ PARSE_CHECK_DUPLICATE_SECTION_EAL(p);
+
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *entry = &entries[i];
@@ -963,413 +822,162 @@ parse_eal(struct app_params *app,
free(entries);
}
-static int
-parse_pipeline_pcap_source(struct app_params *app,
- struct app_pipeline_params *p,
- const char *file_name, const char *cp_size)
-{
- const char *next = NULL;
- char *end;
- uint32_t i;
- int parse_file = 0;
-
- if (file_name && !cp_size) {
- next = file_name;
- parse_file = 1; /* parse file path */
- } else if (cp_size && !file_name) {
- next = cp_size;
- parse_file = 0; /* parse copy size */
- } else
- return -EINVAL;
-
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
-
- if (p->n_pktq_in == 0)
- return -EINVAL;
-
- i = 0;
- while (*next != '\0') {
- uint32_t id;
-
- if (i >= p->n_pktq_in)
- return -EINVAL;
-
- id = p->pktq_in[i].id;
-
- end = strchr(next, ' ');
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
-
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
-
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
-
- if (parse_file) {
- app->source_params[id].file_name = strdup(name);
- if (app->source_params[id].file_name == NULL)
- return -ENOMEM;
- } else {
- if (parser_read_uint32(
- &app->source_params[id].n_bytes_per_pkt,
- name) != 0) {
- if (app->source_params[id].
- file_name != NULL)
- free(app->source_params[id].
- file_name);
- return -EINVAL;
- }
- }
-
- i++;
-
- if (i == p->n_pktq_in)
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int
-parse_pipeline_pcap_sink(struct app_params *app,
- struct app_pipeline_params *p,
- const char *file_name, const char *n_pkts_to_dump)
-{
- const char *next = NULL;
- char *end;
- uint32_t i;
- int parse_file = 0;
-
- if (file_name && !n_pkts_to_dump) {
- next = file_name;
- parse_file = 1; /* parse file path */
- } else if (n_pkts_to_dump && !file_name) {
- next = n_pkts_to_dump;
- parse_file = 0; /* parse copy size */
- } else
- return -EINVAL;
-
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
-
- if (p->n_pktq_out == 0)
- return -EINVAL;
-
- i = 0;
- while (*next != '\0') {
- uint32_t id;
-
- if (i >= p->n_pktq_out)
- return -EINVAL;
-
- id = p->pktq_out[i].id;
-
- end = strchr(next, ' ');
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
-
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
-
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
-
- if (parse_file) {
- app->sink_params[id].file_name = strdup(name);
- if (app->sink_params[id].file_name == NULL)
- return -ENOMEM;
- } else {
- if (parser_read_uint32(
- &app->sink_params[id].n_pkts_to_dump,
- name) != 0) {
- if (app->sink_params[id].file_name !=
- NULL)
- free(app->sink_params[id].
- file_name);
- return -EINVAL;
- }
- }
-
- i++;
-
- if (i == p->n_pktq_out)
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int
+static void
parse_pipeline_pktq_in(struct app_params *app,
struct app_pipeline_params *p,
- const char *value)
+ char *value)
{
- const char *next = value;
- char *end;
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
+ p->n_pktq_in = 0;
- while (*next != '\0') {
+ while (1) {
enum app_pktq_in_type type;
int id;
- char *end_space;
- char *end_tab;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
- next = skip_white_spaces(next);
- if (!next)
+ if (name == NULL)
break;
- end_space = strchr(next, ' ');
- end_tab = strchr(next, ' ');
-
- if (end_space && (!end_tab))
- end = end_space;
- else if ((!end_space) && end_tab)
- end = end_tab;
- else if (end_space && end_tab)
- end = RTE_MIN(end_space, end_tab);
- else
- end = NULL;
-
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
-
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
-
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_pktq_in < RTE_DIM(p->pktq_in)),
+ p->name, "pktq_in", (uint32_t)RTE_DIM(p->pktq_in));
if (validate_name(name, "RXQ", 2) == 0) {
type = APP_PKTQ_IN_HWQ;
id = APP_PARAM_ADD(app->hwq_in_params, name);
+ APP_PARAM_ADD_LINK_FOR_RXQ(app, name);
} else if (validate_name(name, "SWQ", 1) == 0) {
type = APP_PKTQ_IN_SWQ;
id = APP_PARAM_ADD(app->swq_params, name);
} else if (validate_name(name, "TM", 1) == 0) {
type = APP_PKTQ_IN_TM;
id = APP_PARAM_ADD(app->tm_params, name);
+ APP_PARAM_ADD_LINK_FOR_TM(app, name);
+ } else if (validate_name(name, "KNI", 1) == 0) {
+ type = APP_PKTQ_IN_KNI;
+ id = APP_PARAM_ADD(app->kni_params, name);
+ APP_PARAM_ADD_LINK_FOR_KNI(app, name);
} else if (validate_name(name, "SOURCE", 1) == 0) {
type = APP_PKTQ_IN_SOURCE;
id = APP_PARAM_ADD(app->source_params, name);
} else
- return -EINVAL;
-
- if (id < 0)
- return id;
+ PARSE_ERROR_INVALID_ELEMENT(0,
+ p->name, "pktq_in", name);
p->pktq_in[p->n_pktq_in].type = type;
p->pktq_in[p->n_pktq_in].id = (uint32_t) id;
p->n_pktq_in++;
}
- return 0;
+ PARSE_ERROR_NO_ELEMENTS((p->n_pktq_in > 0), p->name, "pktq_in");
}
-static int
+static void
parse_pipeline_pktq_out(struct app_params *app,
struct app_pipeline_params *p,
- const char *value)
+ char *value)
{
- const char *next = value;
- char *end;
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
+ p->n_pktq_out = 0;
- while (*next != '\0') {
+ while (1) {
enum app_pktq_out_type type;
int id;
- char *end_space;
- char *end_tab;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
- next = skip_white_spaces(next);
- if (!next)
+ if (name == NULL)
break;
- end_space = strchr(next, ' ');
- end_tab = strchr(next, ' ');
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_pktq_out < RTE_DIM(p->pktq_out)),
+ p->name, "pktq_out", (uint32_t)RTE_DIM(p->pktq_out));
- if (end_space && (!end_tab))
- end = end_space;
- else if ((!end_space) && end_tab)
- end = end_tab;
- else if (end_space && end_tab)
- end = RTE_MIN(end_space, end_tab);
- else
- end = NULL;
-
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
-
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
-
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
if (validate_name(name, "TXQ", 2) == 0) {
type = APP_PKTQ_OUT_HWQ;
id = APP_PARAM_ADD(app->hwq_out_params, name);
+ APP_PARAM_ADD_LINK_FOR_TXQ(app, name);
} else if (validate_name(name, "SWQ", 1) == 0) {
type = APP_PKTQ_OUT_SWQ;
id = APP_PARAM_ADD(app->swq_params, name);
} else if (validate_name(name, "TM", 1) == 0) {
type = APP_PKTQ_OUT_TM;
id = APP_PARAM_ADD(app->tm_params, name);
+ APP_PARAM_ADD_LINK_FOR_TM(app, name);
+ } else if (validate_name(name, "KNI", 1) == 0) {
+ type = APP_PKTQ_OUT_KNI;
+ id = APP_PARAM_ADD(app->kni_params, name);
+ APP_PARAM_ADD_LINK_FOR_KNI(app, name);
} else if (validate_name(name, "SINK", 1) == 0) {
type = APP_PKTQ_OUT_SINK;
id = APP_PARAM_ADD(app->sink_params, name);
} else
- return -EINVAL;
-
- if (id < 0)
- return id;
+ PARSE_ERROR_INVALID_ELEMENT(0,
+ p->name, "pktq_out", name);
p->pktq_out[p->n_pktq_out].type = type;
p->pktq_out[p->n_pktq_out].id = id;
p->n_pktq_out++;
}
- return 0;
+ PARSE_ERROR_NO_ELEMENTS((p->n_pktq_out > 0), p->name, "pktq_out");
}
-static int
+static void
parse_pipeline_msgq_in(struct app_params *app,
struct app_pipeline_params *p,
- const char *value)
+ char *value)
{
- const char *next = value;
- char *end;
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
- ssize_t idx;
-
- while (*next != '\0') {
- char *end_space;
- char *end_tab;
-
- next = skip_white_spaces(next);
- if (!next)
- break;
-
- end_space = strchr(next, ' ');
- end_tab = strchr(next, ' ');
+ p->n_msgq_in = 0;
- if (end_space && (!end_tab))
- end = end_space;
- else if ((!end_space) && end_tab)
- end = end_tab;
- else if (end_space && end_tab)
- end = RTE_MIN(end_space, end_tab);
- else
- end = NULL;
+ while (1) {
+ int idx;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
+ if (name == NULL)
+ break;
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_msgq_in < RTE_DIM(p->msgq_in)),
+ p->name, "msgq_in", (uint32_t)(RTE_DIM(p->msgq_in)));
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
-
- if (validate_name(name, "MSGQ", 1) != 0)
- return -EINVAL;
+ PARSE_ERROR_INVALID_ELEMENT(
+ (validate_name(name, "MSGQ", 1) == 0),
+ p->name, "msgq_in", name);
idx = APP_PARAM_ADD(app->msgq_params, name);
- if (idx < 0)
- return idx;
-
p->msgq_in[p->n_msgq_in] = idx;
p->n_msgq_in++;
}
- return 0;
+ PARSE_ERROR_NO_ELEMENTS((p->n_msgq_in > 0), p->name, "msgq_in");
}
-static int
+static void
parse_pipeline_msgq_out(struct app_params *app,
struct app_pipeline_params *p,
- const char *value)
+ char *value)
{
- const char *next = value;
- char *end;
- char name[APP_PARAM_NAME_SIZE];
- size_t name_len;
- ssize_t idx;
-
- while (*next != '\0') {
- char *end_space;
- char *end_tab;
-
- next = skip_white_spaces(next);
- if (!next)
- break;
-
- end_space = strchr(next, ' ');
- end_tab = strchr(next, ' ');
-
- if (end_space && (!end_tab))
- end = end_space;
- else if ((!end_space) && end_tab)
- end = end_tab;
- else if (end_space && end_tab)
- end = RTE_MIN(end_space, end_tab);
- else
- end = NULL;
+ p->n_msgq_out = 0;
- if (!end)
- name_len = strlen(next);
- else
- name_len = end - next;
+ while (1) {
+ int idx;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
- if (name_len == 0 || name_len == sizeof(name))
- return -EINVAL;
+ if (name == NULL)
+ break;
- strncpy(name, next, name_len);
- name[name_len] = '\0';
- next += name_len;
- if (*next != '\0')
- next++;
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_msgq_out < RTE_DIM(p->msgq_out)),
+ p->name, "msgq_out", (uint32_t)RTE_DIM(p->msgq_out));
- if (validate_name(name, "MSGQ", 1) != 0)
- return -EINVAL;
+ PARSE_ERROR_INVALID_ELEMENT(
+ (validate_name(name, "MSGQ", 1) == 0),
+ p->name, "msgq_out", name);
idx = APP_PARAM_ADD(app->msgq_params, name);
- if (idx < 0)
- return idx;
-
p->msgq_out[p->n_msgq_out] = idx;
p->n_msgq_out++;
}
- return 0;
+ PARSE_ERROR_NO_ELEMENTS((p->n_msgq_out > 0), p->name, "msgq_out");
}
static void
@@ -1392,9 +1000,8 @@ parse_pipeline(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->pipeline_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->pipeline_params, section_name);
-
param = &app->pipeline_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1421,38 +1028,26 @@ parse_pipeline(struct app_params *app,
}
if (strcmp(ent->name, "pktq_in") == 0) {
- int status = parse_pipeline_pktq_in(app, param,
- ent->value);
+ parse_pipeline_pktq_in(app, param, ent->value);
- PARSE_ERROR((status == 0), section_name,
- ent->name);
continue;
}
if (strcmp(ent->name, "pktq_out") == 0) {
- int status = parse_pipeline_pktq_out(app, param,
- ent->value);
+ parse_pipeline_pktq_out(app, param, ent->value);
- PARSE_ERROR((status == 0), section_name,
- ent->name);
continue;
}
if (strcmp(ent->name, "msgq_in") == 0) {
- int status = parse_pipeline_msgq_in(app, param,
- ent->value);
+ parse_pipeline_msgq_in(app, param, ent->value);
- PARSE_ERROR((status == 0), section_name,
- ent->name);
continue;
}
if (strcmp(ent->name, "msgq_out") == 0) {
- int status = parse_pipeline_msgq_out(app, param,
- ent->value);
+ parse_pipeline_msgq_out(app, param, ent->value);
- PARSE_ERROR((status == 0), section_name,
- ent->name);
continue;
}
@@ -1466,66 +1061,6 @@ parse_pipeline(struct app_params *app,
continue;
}
- if (strcmp(ent->name, "pcap_file_rd") == 0) {
- int status;
-
-#ifndef RTE_PORT_PCAP
- PARSE_ERROR_INVALID(0, section_name, ent->name);
-#endif
-
- status = parse_pipeline_pcap_source(app,
- param, ent->value, NULL);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pcap_bytes_rd_per_pkt") == 0) {
- int status;
-
-#ifndef RTE_PORT_PCAP
- PARSE_ERROR_INVALID(0, section_name, ent->name);
-#endif
-
- status = parse_pipeline_pcap_source(app,
- param, NULL, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pcap_file_wr") == 0) {
- int status;
-
-#ifndef RTE_PORT_PCAP
- PARSE_ERROR_INVALID(0, section_name, ent->name);
-#endif
-
- status = parse_pipeline_pcap_sink(app, param,
- ent->value, NULL);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pcap_n_pkt_wr") == 0) {
- int status;
-
-#ifndef RTE_PORT_PCAP
- PARSE_ERROR_INVALID(0, section_name, ent->name);
-#endif
-
- status = parse_pipeline_pcap_sink(app, param,
- NULL, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
/* pipeline type specific items */
APP_CHECK((param->n_args < APP_MAX_PIPELINE_ARGS),
"Parse error in section \"%s\": too many "
@@ -1541,17 +1076,13 @@ parse_pipeline(struct app_params *app,
param->n_args++;
}
- param->parsed = 1;
-
snprintf(name, sizeof(name), "MSGQ-REQ-%s", section_name);
param_idx = APP_PARAM_ADD(app->msgq_params, name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
param->msgq_in[param->n_msgq_in++] = param_idx;
snprintf(name, sizeof(name), "MSGQ-RSP-%s", section_name);
param_idx = APP_PARAM_ADD(app->msgq_params, name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
param->msgq_out[param->n_msgq_out++] = param_idx;
@@ -1560,7 +1091,6 @@ parse_pipeline(struct app_params *app,
param->core_id,
(param->hyper_th_id) ? "h" : "");
param_idx = APP_PARAM_ADD(app->msgq_params, name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
snprintf(name, sizeof(name), "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
@@ -1568,7 +1098,6 @@ parse_pipeline(struct app_params *app,
param->core_id,
(param->hyper_th_id) ? "h" : "");
param_idx = APP_PARAM_ADD(app->msgq_params, name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
free(entries);
@@ -1593,9 +1122,8 @@ parse_mempool(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->mempool_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->mempool_params, section_name);
-
param = &app->mempool_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1640,11 +1168,152 @@ parse_mempool(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
+static int
+parse_link_rss_qs(struct app_link_params *p,
+ char *value)
+{
+ p->n_rss_qs = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (p->n_rss_qs == RTE_DIM(p->rss_qs))
+ return -ENOMEM;
+
+ if (parser_read_uint32(&p->rss_qs[p->n_rss_qs++], token))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+parse_link_rss_proto_ipv4(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "IP") == 0) {
+ mask |= ETH_RSS_IPV4;
+ continue;
+ }
+ if (strcmp(token, "FRAG") == 0) {
+ mask |= ETH_RSS_FRAG_IPV4;
+ continue;
+ }
+ if (strcmp(token, "TCP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_TCP;
+ continue;
+ }
+ if (strcmp(token, "UDP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_UDP;
+ continue;
+ }
+ if (strcmp(token, "SCTP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_SCTP;
+ continue;
+ }
+ if (strcmp(token, "OTHER") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_OTHER;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_ipv4 = mask;
+ return 0;
+}
+
+static int
+parse_link_rss_proto_ipv6(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "IP") == 0) {
+ mask |= ETH_RSS_IPV6;
+ continue;
+ }
+ if (strcmp(token, "FRAG") == 0) {
+ mask |= ETH_RSS_FRAG_IPV6;
+ continue;
+ }
+ if (strcmp(token, "TCP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_TCP;
+ continue;
+ }
+ if (strcmp(token, "UDP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_UDP;
+ continue;
+ }
+ if (strcmp(token, "SCTP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_SCTP;
+ continue;
+ }
+ if (strcmp(token, "OTHER") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_OTHER;
+ continue;
+ }
+ if (strcmp(token, "IP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_EX;
+ continue;
+ }
+ if (strcmp(token, "TCP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_TCP_EX;
+ continue;
+ }
+ if (strcmp(token, "UDP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_UDP_EX;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_ipv6 = mask;
+ return 0;
+}
+
+static int
+parse_link_rss_proto_l2(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "L2") == 0) {
+ mask |= ETH_RSS_L2_PAYLOAD;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_l2 = mask;
+ return 0;
+}
+
static void
parse_link(struct app_params *app,
const char *section_name,
@@ -1653,6 +1322,10 @@ parse_link(struct app_params *app,
struct app_link_params *param;
struct rte_cfgfile_entry *entries;
int n_entries, i;
+ int rss_qs_present = 0;
+ int rss_proto_ipv4_present = 0;
+ int rss_proto_ipv6_present = 0;
+ int rss_proto_l2_present = 0;
int pci_bdf_present = 0;
ssize_t param_idx;
@@ -1665,9 +1338,8 @@ parse_link(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->link_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->link_params, section_name);
-
param = &app->link_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1707,7 +1379,6 @@ parse_link(struct app_params *app,
continue;
}
-
if (strcmp(ent->name, "tcp_local_q") == 0) {
int status = parser_read_uint32(
&param->tcp_local_q, ent->value);
@@ -1735,6 +1406,44 @@ parse_link(struct app_params *app,
continue;
}
+ if (strcmp(ent->name, "rss_qs") == 0) {
+ int status = parse_link_rss_qs(param, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ rss_qs_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_ipv4") == 0) {
+ int status =
+ parse_link_rss_proto_ipv4(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_ipv4_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_ipv6") == 0) {
+ int status =
+ parse_link_rss_proto_ipv6(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_ipv6_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_l2") == 0) {
+ int status = parse_link_rss_proto_l2(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_l2_present = 1;
+ continue;
+ }
+
if (strcmp(ent->name, "pci_bdf") == 0) {
PARSE_ERROR_DUPLICATE((pci_bdf_present == 0),
section_name, ent->name);
@@ -1760,7 +1469,28 @@ parse_link(struct app_params *app,
"this entry is mandatory (port_mask is not "
"provided)");
- param->parsed = 1;
+ if (rss_proto_ipv4_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_ipv4",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_ipv6_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_ipv6",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_l2_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_l2",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_ipv4_present |
+ rss_proto_ipv6_present |
+ rss_proto_l2_present){
+ if (rss_proto_ipv4_present == 0)
+ param->rss_proto_ipv4 = 0;
+ if (rss_proto_ipv6_present == 0)
+ param->rss_proto_ipv6 = 0;
+ if (rss_proto_l2_present == 0)
+ param->rss_proto_l2 = 0;
+ }
free(entries);
}
@@ -1784,9 +1514,10 @@ parse_rxq(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->hwq_in_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->hwq_in_params, section_name);
-
param = &app->hwq_in_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_RXQ(app, section_name);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1798,10 +1529,8 @@ parse_rxq(struct app_params *app,
PARSE_ERROR((status == 0), section_name,
ent->name);
- idx = APP_PARAM_ADD(app->mempool_params,
- ent->value);
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
- section_name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
param->mempool_id = idx;
continue;
}
@@ -1828,8 +1557,6 @@ parse_rxq(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -1852,9 +1579,10 @@ parse_txq(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->hwq_out_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->hwq_out_params, section_name);
-
param = &app->hwq_out_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_TXQ(app, section_name);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -1887,12 +1615,19 @@ parse_txq(struct app_params *app,
continue;
}
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
/* unrecognized */
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -1920,9 +1655,8 @@ parse_swq(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->swq_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->swq_params, section_name);
-
param = &app->swq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2050,11 +1784,9 @@ parse_swq(struct app_params *app,
PARSE_ERROR((status == 0), section_name,
ent->name);
- idx = APP_PARAM_ADD(app->mempool_params,
- ent->value);
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
- section_name);
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
param->mempool_direct_id = idx;
+
mempool_direct_present = 1;
continue;
}
@@ -2066,11 +1798,10 @@ parse_swq(struct app_params *app,
PARSE_ERROR((status == 0), section_name,
ent->name);
- idx = APP_PARAM_ADD(app->mempool_params,
- ent->value);
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
- section_name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
param->mempool_indirect_id = idx;
+
mempool_indirect_present = 1;
continue;
}
@@ -2079,32 +1810,30 @@ parse_swq(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- APP_CHECK(((mtu_present) &&
+ APP_CHECK(((mtu_present == 0) ||
((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
"Parse error in section \"%s\": IPv4/IPv6 fragmentation "
"is off, therefore entry \"mtu\" is not allowed",
section_name);
- APP_CHECK(((metadata_size_present) &&
+ APP_CHECK(((metadata_size_present == 0) ||
((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
"Parse error in section \"%s\": IPv4/IPv6 fragmentation "
"is off, therefore entry \"metadata_size\" is "
"not allowed", section_name);
- APP_CHECK(((mempool_direct_present) &&
+ APP_CHECK(((mempool_direct_present == 0) ||
((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
"Parse error in section \"%s\": IPv4/IPv6 fragmentation "
"is off, therefore entry \"mempool_direct\" is "
"not allowed", section_name);
- APP_CHECK(((mempool_indirect_present) &&
+ APP_CHECK(((mempool_indirect_present == 0) ||
((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
"Parse error in section \"%s\": IPv4/IPv6 fragmentation "
"is off, therefore entry \"mempool_indirect\" is "
"not allowed", section_name);
- param->parsed = 1;
-
free(entries);
}
@@ -2127,9 +1856,10 @@ parse_tm(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->tm_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->tm_params, section_name);
-
param = &app->tm_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_TM(app, section_name);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2162,7 +1892,101 @@ parse_tm(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
+ free(entries);
+}
+
+static void
+parse_kni(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_kni_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->kni_params, section_name);
+ param = &app->kni_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_KNI(app, section_name);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "core") == 0) {
+ int status = parse_pipeline_core(
+ &param->socket_id,
+ &param->core_id,
+ &param->hyper_th_id,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ param->force_bind = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(&param->burst_read,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(&param->burst_write,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
free(entries);
}
@@ -2188,9 +2012,8 @@ parse_source(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->source_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->source_params, section_name);
-
param = &app->source_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2202,10 +2025,8 @@ parse_source(struct app_params *app,
PARSE_ERROR((status == 0), section_name,
ent->name);
- idx = APP_PARAM_ADD(app->mempool_params,
- ent->value);
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
- section_name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
param->mempool_id = idx;
continue;
}
@@ -2219,7 +2040,7 @@ parse_source(struct app_params *app,
continue;
}
- if (strcmp(ent->name, "pcap_file_rd")) {
+ if (strcmp(ent->name, "pcap_file_rd") == 0) {
PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
section_name, ent->name);
@@ -2251,8 +2072,6 @@ parse_source(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -2277,14 +2096,13 @@ parse_sink(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->sink_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->sink_params, section_name);
-
param = &app->sink_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
- if (strcmp(ent->name, "pcap_file_wr")) {
+ if (strcmp(ent->name, "pcap_file_wr") == 0) {
PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
section_name, ent->name);
@@ -2295,7 +2113,7 @@ parse_sink(struct app_params *app,
continue;
}
- if (strcmp(ent->name, "pcap_n_pkt_wr")) {
+ if (strcmp(ent->name, "pcap_n_pkt_wr") == 0) {
int status;
PARSE_ERROR_DUPLICATE((pcap_n_pkt_present == 0),
@@ -2314,8 +2132,6 @@ parse_sink(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -2338,9 +2154,8 @@ parse_msgq_req_pipeline(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
-
param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2358,7 +2173,6 @@ parse_msgq_req_pipeline(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
free(entries);
}
@@ -2381,9 +2195,8 @@ parse_msgq_rsp_pipeline(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
-
param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2401,8 +2214,6 @@ parse_msgq_rsp_pipeline(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -2425,9 +2236,8 @@ parse_msgq(struct app_params *app,
rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
-
param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
for (i = 0; i < n_entries; i++) {
struct rte_cfgfile_entry *ent = &entries[i];
@@ -2454,8 +2264,6 @@ parse_msgq(struct app_params *app,
PARSE_ERROR_INVALID(0, section_name, ent->name);
}
- param->parsed = 1;
-
free(entries);
}
@@ -2478,6 +2286,7 @@ static const struct config_section cfg_file_scheme[] = {
{"TXQ", 2, parse_txq},
{"SWQ", 1, parse_swq},
{"TM", 1, parse_tm},
+ {"KNI", 1, parse_kni},
{"SOURCE", 1, parse_source},
{"SINK", 1, parse_sink},
{"MSGQ-REQ-PIPELINE", 1, parse_msgq_req_pipeline},
@@ -2488,10 +2297,7 @@ static const struct config_section cfg_file_scheme[] = {
static void
create_implicit_mempools(struct app_params *app)
{
- ssize_t idx;
-
- idx = APP_PARAM_ADD(app->mempool_params, "MEMPOOL0");
- PARSER_PARAM_ADD_CHECK(idx, app->mempool_params, "start-up");
+ APP_PARAM_ADD(app->mempool_params, "MEMPOOL0");
}
static void
@@ -2510,7 +2316,6 @@ create_implicit_links_from_port_mask(struct app_params *app,
snprintf(name, sizeof(name), "LINK%" PRIu32, link_id);
idx = APP_PARAM_ADD(app->link_params, name);
- PARSER_PARAM_ADD_CHECK(idx, app->link_params, name);
app->link_params[idx].pmd_id = pmd_id;
link_id++;
@@ -2525,6 +2330,11 @@ assign_link_pmd_id_from_pci_bdf(struct app_params *app)
for (i = 0; i < app->n_links; i++) {
struct app_link_params *link = &app->link_params[i];
+ APP_CHECK((strlen(link->pci_bdf)),
+ "Parse error: %s pci_bdf is not configured "
+ "(port_mask is not provided)",
+ link->name);
+
link->pmd_id = i;
}
}
@@ -2615,28 +2425,12 @@ app_config_parse(struct app_params *app, const char *file_name)
APP_PARAM_COUNT(app->hwq_out_params, app->n_pktq_hwq_out);
APP_PARAM_COUNT(app->swq_params, app->n_pktq_swq);
APP_PARAM_COUNT(app->tm_params, app->n_pktq_tm);
+ APP_PARAM_COUNT(app->kni_params, app->n_pktq_kni);
APP_PARAM_COUNT(app->source_params, app->n_pktq_source);
APP_PARAM_COUNT(app->sink_params, app->n_pktq_sink);
APP_PARAM_COUNT(app->msgq_params, app->n_msgq);
APP_PARAM_COUNT(app->pipeline_params, app->n_pipelines);
-#ifdef RTE_PORT_PCAP
- for (i = 0; i < (int)app->n_pktq_source; i++) {
- struct app_pktq_source_params *p = &app->source_params[i];
-
- APP_CHECK((p->file_name), "Parse error: missing "
- "mandatory field \"pcap_file_rd\" for \"%s\"",
- p->name);
- }
-#else
- for (i = 0; i < (int)app->n_pktq_source; i++) {
- struct app_pktq_source_params *p = &app->source_params[i];
-
- APP_CHECK((!p->file_name), "Parse error: invalid field "
- "\"pcap_file_rd\" for \"%s\"", p->name);
- }
-#endif
-
if (app->port_mask == 0)
assign_link_pmd_id_from_pci_bdf(app);
@@ -2803,6 +2597,84 @@ save_links_params(struct app_params *app, FILE *f)
fprintf(f, "%s = %" PRIu32 "\n", "sctp_local_q",
p->sctp_local_q);
+ if (p->n_rss_qs) {
+ uint32_t j;
+
+ /* rss_qs */
+ fprintf(f, "rss_qs = ");
+ for (j = 0; j < p->n_rss_qs; j++)
+ fprintf(f, "%" PRIu32 " ", p->rss_qs[j]);
+ fputc('\n', f);
+
+ /* rss_proto_ipv4 */
+ if (p->rss_proto_ipv4) {
+ fprintf(f, "rss_proto_ipv4 = ");
+ if (p->rss_proto_ipv4 & ETH_RSS_IPV4)
+ fprintf(f, "IP ");
+ if (p->rss_proto_ipv4 & ETH_RSS_FRAG_IPV4)
+ fprintf(f, "FRAG ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_TCP)
+ fprintf(f, "TCP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_UDP)
+ fprintf(f, "UDP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_SCTP)
+ fprintf(f, "SCTP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+ fprintf(f, "OTHER ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
+
+ /* rss_proto_ipv6 */
+ if (p->rss_proto_ipv6) {
+ fprintf(f, "rss_proto_ipv6 = ");
+ if (p->rss_proto_ipv6 & ETH_RSS_IPV6)
+ fprintf(f, "IP ");
+ if (p->rss_proto_ipv6 & ETH_RSS_FRAG_IPV6)
+ fprintf(f, "FRAG ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_TCP)
+ fprintf(f, "TCP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_UDP)
+ fprintf(f, "UDP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+ fprintf(f, "SCTP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_OTHER)
+ fprintf(f, "OTHER ");
+ if (p->rss_proto_ipv6 & ETH_RSS_IPV6_EX)
+ fprintf(f, "IP_EX ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_IPV6_TCP_EX)
+ fprintf(f, "TCP_EX ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_IPV6_UDP_EX)
+ fprintf(f, "UDP_EX ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
+
+ /* rss_proto_l2 */
+ if (p->rss_proto_l2) {
+ fprintf(f, "rss_proto_l2 = ");
+ if (p->rss_proto_l2 & ETH_RSS_L2_PAYLOAD)
+ fprintf(f, "L2 ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_l2 = <NONE>\n");
+ } else {
+ fprintf(f, "; rss_qs = <NONE>\n");
+ fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
+ fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
+ fprintf(f, "; rss_proto_l2 = <NONE>\n");
+ }
+
if (strlen(p->pci_bdf))
fprintf(f, "%s = %s\n", "pci_bdf", p->pci_bdf);
@@ -2851,6 +2723,7 @@ save_txq_params(struct app_params *app, FILE *f)
fprintf(f, "%s = %s\n",
"dropless",
p->dropless ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
fputc('\n', f);
}
@@ -2916,6 +2789,53 @@ save_tm_params(struct app_params *app, FILE *f)
}
static void
+save_kni_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_kni_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->kni_params);
+ for (i = 0; i < count; i++) {
+ p = &app->kni_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ /* section name */
+ fprintf(f, "[%s]\n", p->name);
+
+ /* core */
+ if (p->force_bind) {
+ fprintf(f, "; force_bind = 1\n");
+ fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
+ p->socket_id,
+ p->core_id,
+ (p->hyper_th_id) ? "h" : "");
+ } else
+ fprintf(f, "; force_bind = 0\n");
+
+ /* mempool */
+ fprintf(f, "%s = %s\n", "mempool",
+ app->mempool_params[p->mempool_id].name);
+
+ /* burst_read */
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+
+ /* burst_write */
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+
+ /* dropless */
+ fprintf(f, "%s = %s\n",
+ "dropless",
+ p->dropless ? "yes" : "no");
+
+ /* n_retries */
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+
+ fputc('\n', f);
+ }
+}
+
+static void
save_source_params(struct app_params *app, FILE *f)
{
struct app_pktq_source_params *p;
@@ -3022,6 +2942,9 @@ save_pipeline_params(struct app_params *app, FILE *f)
case APP_PKTQ_IN_TM:
name = app->tm_params[pp->id].name;
break;
+ case APP_PKTQ_IN_KNI:
+ name = app->kni_params[pp->id].name;
+ break;
case APP_PKTQ_IN_SOURCE:
name = app->source_params[pp->id].name;
break;
@@ -3056,6 +2979,9 @@ save_pipeline_params(struct app_params *app, FILE *f)
case APP_PKTQ_OUT_TM:
name = app->tm_params[pp->id].name;
break;
+ case APP_PKTQ_OUT_KNI:
+ name = app->kni_params[pp->id].name;
+ break;
case APP_PKTQ_OUT_SINK:
name = app->sink_params[pp->id].name;
break;
@@ -3141,6 +3067,7 @@ app_config_save(struct app_params *app, const char *file_name)
save_txq_params(app, file);
save_swq_params(app, file);
save_tm_params(app, file);
+ save_kni_params(app, file);
save_source_params(app, file);
save_sink_params(app, file);
save_msgq_params(app, file);
@@ -3156,6 +3083,10 @@ app_config_init(struct app_params *app)
memcpy(app, &app_params_default, sizeof(struct app_params));
+ /* configure default_source_params */
+ default_source_params.file_name = strdup("./config/packets.pcap");
+ PARSE_ERROR_MALLOC(default_source_params.file_name != NULL);
+
for (i = 0; i < RTE_DIM(app->mempool_params); i++)
memcpy(&app->mempool_params[i],
&mempool_params_default,
@@ -3186,6 +3117,11 @@ app_config_init(struct app_params *app)
&default_tm_params,
sizeof(default_tm_params));
+ for (i = 0; i < RTE_DIM(app->kni_params); i++)
+ memcpy(&app->kni_params[i],
+ &default_kni_params,
+ sizeof(default_kni_params));
+
for (i = 0; i < RTE_DIM(app->source_params); i++)
memcpy(&app->source_params[i],
&default_source_params,
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index 83422e88..cd167f61 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -55,6 +55,8 @@
#define APP_NAME_SIZE 32
+#define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+
static void
app_init_core_map(struct app_params *app)
{
@@ -902,6 +904,67 @@ app_get_cpu_socket_id(uint32_t pmd_id)
return (status != SOCKET_ID_ANY) ? status : 0;
}
+static inline int
+app_link_rss_enabled(struct app_link_params *cp)
+{
+ return (cp->n_rss_qs) ? 1 : 0;
+}
+
+static void
+app_link_rss_setup(struct app_link_params *cp)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
+ uint32_t i;
+ int status;
+
+ /* Get RETA size */
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(cp->pmd_id, &dev_info);
+
+ if (dev_info.reta_size == 0)
+ rte_panic("%s (%u): RSS setup error (null RETA size)\n",
+ cp->name, cp->pmd_id);
+
+ if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
+ rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
+ cp->name, cp->pmd_id);
+
+ /* Setup RETA contents */
+ memset(reta_conf, 0, sizeof(reta_conf));
+
+ for (i = 0; i < dev_info.reta_size; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < dev_info.reta_size; i++) {
+ uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
+ uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+ uint32_t rss_qs_pos = i % cp->n_rss_qs;
+
+ reta_conf[reta_id].reta[reta_pos] =
+ (uint16_t) cp->rss_qs[rss_qs_pos];
+ }
+
+ /* RETA update */
+ status = rte_eth_dev_rss_reta_update(cp->pmd_id,
+ reta_conf,
+ dev_info.reta_size);
+ if (status != 0)
+ rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
+ cp->name, cp->pmd_id);
+}
+
+static void
+app_init_link_set_config(struct app_link_params *p)
+{
+ if (p->n_rss_qs) {
+ p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
+ p->rss_proto_ipv6 |
+ p->rss_proto_l2;
+ }
+}
+
static void
app_init_link(struct app_params *app)
{
@@ -917,6 +980,7 @@ app_init_link(struct app_params *app)
sscanf(p_link->name, "LINK%" PRIu32, &link_id);
n_hwq_in = app_link_get_n_rxq(app, p_link);
n_hwq_out = app_link_get_n_txq(app, p_link);
+ app_init_link_set_config(p_link);
APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
"(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
@@ -1001,9 +1065,13 @@ app_init_link(struct app_params *app)
rte_panic("Cannot start %s (error %" PRId32 ")\n",
p_link->name, status);
- /* LINK UP */
+ /* LINK FILTERS */
app_link_set_arp_filter(app, p_link);
app_link_set_tcp_syn_filter(app, p_link);
+ if (app_link_rss_enabled(p_link))
+ app_link_rss_setup(p_link);
+
+ /* LINK UP */
app_link_up_internal(app, p_link);
}
@@ -1108,6 +1176,111 @@ app_init_tm(struct app_params *app)
}
}
+#ifdef RTE_LIBRTE_KNI
+static int
+kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
+ int ret = 0;
+
+ if (port_id >= rte_eth_dev_count())
+ return -EINVAL;
+
+ ret = (if_up) ?
+ rte_eth_dev_set_link_up(port_id) :
+ rte_eth_dev_set_link_down(port_id);
+
+ return ret;
+}
+
+static int
+kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
+ int ret;
+
+ if (port_id >= rte_eth_dev_count())
+ return -EINVAL;
+
+ if (new_mtu > ETHER_MAX_LEN)
+ return -EINVAL;
+
+ /* Set new MTU */
+ ret = rte_eth_dev_set_mtu(port_id, new_mtu);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#endif /* RTE_LIBRTE_KNI */
+
+#ifndef RTE_LIBRTE_KNI
+static void
+app_init_kni(struct app_params *app) {
+ if (app->n_pktq_kni == 0)
+ return;
+
+ rte_panic("Can not init KNI without librte_kni support.\n");
+}
+#else
+static void
+app_init_kni(struct app_params *app) {
+ uint32_t i;
+
+ if (app->n_pktq_kni == 0)
+ return;
+
+ rte_kni_init(app->n_pktq_kni);
+
+ for (i = 0; i < app->n_pktq_kni; i++) {
+ struct app_pktq_kni_params *p_kni = &app->kni_params[i];
+ struct app_link_params *p_link;
+ struct rte_eth_dev_info dev_info;
+ struct app_mempool_params *mempool_params;
+ struct rte_mempool *mempool;
+ struct rte_kni_conf conf;
+ struct rte_kni_ops ops;
+
+ /* LINK */
+ p_link = app_get_link_for_kni(app, p_kni);
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
+
+ /* MEMPOOL */
+ mempool_params = &app->mempool_params[p_kni->mempool_id];
+ mempool = app->mempool[p_kni->mempool_id];
+
+ /* KNI */
+ memset(&conf, 0, sizeof(conf));
+ snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
+ conf.force_bind = p_kni->force_bind;
+ if (conf.force_bind) {
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ p_kni->socket_id,
+ p_kni->core_id,
+ p_kni->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("%s invalid CPU core\n", p_kni->name);
+
+ conf.core_id = (uint32_t) lcore_id;
+ }
+ conf.group_id = p_link->pmd_id;
+ conf.mbuf_size = mempool_params->buffer_size;
+ conf.addr = dev_info.pci_dev->addr;
+ conf.id = dev_info.pci_dev->id;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.port_id = (uint8_t) p_link->pmd_id;
+ ops.change_mtu = kni_change_mtu;
+ ops.config_network_if = kni_config_network_interface;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
+ app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
+ if (!app->kni[i])
+ rte_panic("%s init error\n", p_kni->name);
+ }
+}
+#endif /* RTE_LIBRTE_KNI */
+
static void
app_init_msgq(struct app_params *app)
{
@@ -1128,15 +1301,16 @@ app_init_msgq(struct app_params *app)
}
}
-static void app_pipeline_params_get(struct app_params *app,
+void app_pipeline_params_get(struct app_params *app,
struct app_pipeline_params *p_in,
struct pipeline_params *p_out)
{
uint32_t i;
- uint32_t mempool_id;
snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
+ snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
+
p_out->socket_id = (int) p_in->socket_id;
p_out->log_level = app->log_level;
@@ -1212,34 +1386,35 @@ static void app_pipeline_params_get(struct app_params *app,
break;
}
case APP_PKTQ_IN_TM:
+ {
out->type = PIPELINE_PORT_IN_SCHED_READER;
out->params.sched.sched = app->tm[in->id];
out->burst_size = app->tm_params[in->id].burst_read;
break;
+ }
+#ifdef RTE_LIBRTE_KNI
+ case APP_PKTQ_IN_KNI:
+ {
+ out->type = PIPELINE_PORT_IN_KNI_READER;
+ out->params.kni.kni = app->kni[in->id];
+ out->burst_size = app->kni_params[in->id].burst_read;
+ break;
+ }
+#endif /* RTE_LIBRTE_KNI */
case APP_PKTQ_IN_SOURCE:
- mempool_id = app->source_params[in->id].mempool_id;
+ {
+ uint32_t mempool_id =
+ app->source_params[in->id].mempool_id;
+
out->type = PIPELINE_PORT_IN_SOURCE;
out->params.source.mempool = app->mempool[mempool_id];
out->burst_size = app->source_params[in->id].burst;
-
-#ifdef RTE_NEXT_ABI
- if (app->source_params[in->id].file_name
- != NULL) {
- out->params.source.file_name = strdup(
- app->source_params[in->id].
- file_name);
- if (out->params.source.file_name == NULL) {
- out->params.source.
- n_bytes_per_pkt = 0;
- break;
- }
- out->params.source.n_bytes_per_pkt =
- app->source_params[in->id].
- n_bytes_per_pkt;
- }
-#endif
-
+ out->params.source.file_name =
+ app->source_params[in->id].file_name;
+ out->params.source.n_bytes_per_pkt =
+ app->source_params[in->id].n_bytes_per_pkt;
break;
+ }
default:
break;
}
@@ -1350,7 +1525,8 @@ static void app_pipeline_params_get(struct app_params *app,
}
break;
}
- case APP_PKTQ_OUT_TM: {
+ case APP_PKTQ_OUT_TM:
+ {
struct rte_port_sched_writer_params *params =
&out->params.sched;
@@ -1360,24 +1536,45 @@ static void app_pipeline_params_get(struct app_params *app,
app->tm_params[in->id].burst_write;
break;
}
- case APP_PKTQ_OUT_SINK:
- out->type = PIPELINE_PORT_OUT_SINK;
- if (app->sink_params[in->id].file_name != NULL) {
- out->params.sink.file_name = strdup(
- app->sink_params[in->id].
- file_name);
- if (out->params.sink.file_name == NULL) {
- out->params.sink.max_n_pkts = 0;
- break;
- }
- out->params.sink.max_n_pkts =
- app->sink_params[in->id].
- n_pkts_to_dump;
+#ifdef RTE_LIBRTE_KNI
+ case APP_PKTQ_OUT_KNI:
+ {
+ struct app_pktq_kni_params *p_kni =
+ &app->kni_params[in->id];
+
+ if (p_kni->dropless == 0) {
+ struct rte_port_kni_writer_params *params =
+ &out->params.kni;
+
+ out->type = PIPELINE_PORT_OUT_KNI_WRITER;
+ params->kni = app->kni[in->id];
+ params->tx_burst_sz =
+ app->kni_params[in->id].burst_write;
} else {
- out->params.sink.file_name = NULL;
- out->params.sink.max_n_pkts = 0;
+ struct rte_port_kni_writer_nodrop_params
+ *params = &out->params.kni_nodrop;
+
+ out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
+ params->kni = app->kni[in->id];
+ params->tx_burst_sz =
+ app->kni_params[in->id].burst_write;
+ params->n_retries =
+ app->kni_params[in->id].n_retries;
}
break;
+ }
+#endif /* RTE_LIBRTE_KNI */
+ case APP_PKTQ_OUT_SINK:
+ {
+ out->type = PIPELINE_PORT_OUT_SINK;
+ out->params.sink.file_name =
+ app->sink_params[in->id].file_name;
+ out->params.sink.max_n_pkts =
+ app->sink_params[in->id].
+ n_pkts_to_dump;
+
+ break;
+ }
default:
break;
}
@@ -1449,6 +1646,27 @@ app_init_pipelines(struct app_params *app)
}
static void
+app_post_init_pipelines(struct app_params *app)
+{
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ int status;
+
+ if (data->ptype->fe_ops->f_post_init == NULL)
+ continue;
+
+ status = data->ptype->fe_ops->f_post_init(data->fe);
+ if (status)
+ rte_panic("Pipeline instance \"%s\" front-end "
+ "post-init error\n", params->name);
+ }
+}
+
+static void
app_init_threads(struct app_params *app)
{
uint64_t time = rte_get_tsc_cycles();
@@ -1534,6 +1752,7 @@ int app_init(struct app_params *app)
app_init_link(app);
app_init_swq(app);
app_init_tm(app);
+ app_init_kni(app);
app_init_msgq(app);
app_pipeline_common_cmd_push(app);
@@ -1551,6 +1770,13 @@ int app_init(struct app_params *app)
return 0;
}
+int app_post_init(struct app_params *app)
+{
+ app_post_init_pipelines(app);
+
+ return 0;
+}
+
static int
app_pipeline_type_cmd_push(struct app_params *app,
struct pipeline_type *ptype)
diff --git a/examples/ip_pipeline/parser.c b/examples/ip_pipeline/parser.c
new file mode 100644
index 00000000..689e2065
--- /dev/null
+++ b/examples/ip_pipeline/parser.c
@@ -0,0 +1,745 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For my_ether_aton() function:
+ *
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For inet_pton4() and inet_pton6() functions:
+ *
+ * Copyright (c) 1996 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
+ * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
+ * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if ((string == NULL) ||
+ (tokens == NULL) ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if ((i == *n_tokens) &&
+ (NULL != strtok_r(string, PARSE_DELIMITER, &string)))
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+{
+ uint32_t n_max_labels = *n_labels, count = 0;
+
+ /* Check for void list of labels */
+ if (strcmp(string, "<void>") == 0) {
+ *n_labels = 0;
+ return 0;
+ }
+
+ /* At least one label should be present */
+ for ( ; (*string != '\0'); ) {
+ char *next;
+ int value;
+
+ if (count >= n_max_labels)
+ return -1;
+
+ if (count > 0) {
+ if (string[0] != ':')
+ return -1;
+
+ string++;
+ }
+
+ value = strtol(string, &next, 10);
+ if (next == string)
+ return -1;
+ string = next;
+
+ labels[count++] = (uint32_t) value;
+ }
+
+ *n_labels = count;
+ return 0;
+}
+
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char) ((val >> 8) & 0xff);
+ *tp++ = (unsigned char) (val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char) ((val >> 8) & 0xff);
+ *tp++ = (unsigned char) (val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+static struct ether_addr *
+my_ether_aton(const char *a)
+{
+ int i;
+ char *end;
+ unsigned long o[ETHER_ADDR_LEN];
+ static struct ether_addr ether_addr;
+
+ i = 0;
+ do {
+ errno = 0;
+ o[i] = strtoul(a, &end, 16);
+ if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+ return NULL;
+ a = end + 1;
+ } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0);
+
+ /* Junk at the end of line */
+ if (end[0] != 0)
+ return NULL;
+
+ /* Support the format XX:XX:XX:XX:XX:XX */
+ if (i == ETHER_ADDR_LEN) {
+ while (i-- != 0) {
+ if (o[i] > UINT8_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i] = (uint8_t)o[i];
+ }
+ /* Support the format XXXX:XXXX:XXXX */
+ } else if (i == ETHER_ADDR_LEN / 2) {
+ while (i-- != 0) {
+ if (o[i] > UINT16_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8);
+ ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
+ }
+ /* unknown format */
+ } else
+ return NULL;
+
+ return (struct ether_addr *)&ether_addr;
+}
+
+int
+parse_ipv4_addr(const char *token, struct in_addr *ipv4)
+{
+ if (strlen(token) >= INET_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton4(token, (unsigned char *)ipv4) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+parse_ipv6_addr(const char *token, struct in6_addr *ipv6)
+{
+ if (strlen(token) >= INET6_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton6(token, (unsigned char *)ipv6) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+parse_mac_addr(const char *token, struct ether_addr *addr)
+{
+ struct ether_addr *tmp;
+
+ tmp = my_ether_aton(token);
+ if (tmp == NULL)
+ return -1;
+
+ memcpy(addr, tmp, sizeof(struct ether_addr));
+ return 0;
+}
+
+int
+parse_pipeline_core(uint32_t *socket,
+ uint32_t *core,
+ uint32_t *ht,
+ const char *entry)
+{
+ size_t num_len;
+ char num[8];
+
+ uint32_t s = 0, c = 0, h = 0, val;
+ uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
+ const char *next = skip_white_spaces(entry);
+ char type;
+
+ /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
+ while (*next != '\0') {
+ /* If everything parsed nothing should left */
+ if (s_parsed && c_parsed && h_parsed)
+ return -EINVAL;
+
+ type = *next;
+ switch (type) {
+ case 's':
+ case 'S':
+ if (s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+ s_parsed = 1;
+ next++;
+ break;
+ case 'c':
+ case 'C':
+ if (c_parsed || h_parsed)
+ return -EINVAL;
+ c_parsed = 1;
+ next++;
+ break;
+ case 'h':
+ case 'H':
+ if (h_parsed)
+ return -EINVAL;
+ h_parsed = 1;
+ next++;
+ break;
+ default:
+ /* If it start from digit it must be only core id. */
+ if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+
+ type = 'C';
+ }
+
+ for (num_len = 0; *next != '\0'; next++, num_len++) {
+ if (num_len == RTE_DIM(num))
+ return -EINVAL;
+
+ if (!isdigit(*next))
+ break;
+
+ num[num_len] = *next;
+ }
+
+ if (num_len == 0 && type != 'h' && type != 'H')
+ return -EINVAL;
+
+ if (num_len != 0 && (type == 'h' || type == 'H'))
+ return -EINVAL;
+
+ num[num_len] = '\0';
+ val = strtol(num, NULL, 10);
+
+ h = 0;
+ switch (type) {
+ case 's':
+ case 'S':
+ s = val;
+ break;
+ case 'c':
+ case 'C':
+ c = val;
+ break;
+ case 'h':
+ case 'H':
+ h = 1;
+ break;
+ }
+ }
+
+ *socket = s;
+ *core = c;
+ *ht = h;
+ return 0;
+}
diff --git a/examples/ip_pipeline/parser.h b/examples/ip_pipeline/parser.h
index 58b59daf..9bd36af3 100644
--- a/examples/ip_pipeline/parser.h
+++ b/examples/ip_pipeline/parser.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,17 +34,51 @@
#ifndef __INCLUDE_PARSER_H__
#define __INCLUDE_PARSER_H__
-int
-parser_read_arg_bool(const char *p);
+#include <stdint.h>
-int
-parser_read_uint64(uint64_t *value, const char *p);
+#include <rte_ip.h>
+#include <rte_ether.h>
-int
-parser_read_uint32(uint32_t *value, const char *p);
+#define PARSE_DELIMITER " \f\n\r\t\v"
-int
-parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
-#endif
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int parser_read_arg_bool(const char *p);
+
+int parser_read_uint64(uint64_t *value, const char *p);
+int parser_read_uint32(uint32_t *value, const char *p);
+int parser_read_uint16(uint16_t *value, const char *p);
+int parser_read_uint8(uint8_t *value, const char *p);
+int parser_read_uint64_hex(uint64_t *value, const char *p);
+int parser_read_uint32_hex(uint32_t *value, const char *p);
+int parser_read_uint16_hex(uint16_t *value, const char *p);
+int parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int parse_ipv4_addr(const char *token, struct in_addr *ipv4);
+int parse_ipv6_addr(const char *token, struct in6_addr *ipv6);
+int parse_mac_addr(const char *token, struct ether_addr *addr);
+int parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels);
+
+int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
+
+#endif
diff --git a/examples/ip_pipeline/pipeline.h b/examples/ip_pipeline/pipeline.h
index dab9c36d..14a551db 100644
--- a/examples/ip_pipeline/pipeline.h
+++ b/examples/ip_pipeline/pipeline.h
@@ -42,13 +42,22 @@
* Pipeline type front-end operations
*/
-typedef void* (*pipeline_fe_op_init)(struct pipeline_params *params, void *arg);
+typedef void* (*pipeline_fe_op_init)(struct pipeline_params *params,
+ void *arg);
+
+typedef int (*pipeline_fe_op_post_init)(void *pipeline);
typedef int (*pipeline_fe_op_free)(void *pipeline);
+typedef int (*pipeline_fe_op_track)(struct pipeline_params *params,
+ uint32_t port_in,
+ uint32_t *port_out);
+
struct pipeline_fe_ops {
pipeline_fe_op_init f_init;
+ pipeline_fe_op_post_init f_post_init;
pipeline_fe_op_free f_free;
+ pipeline_fe_op_track f_track;
cmdline_parse_ctx_t *cmds;
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.c b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
index a691d422..cd1d082a 100644
--- a/examples/ip_pipeline/pipeline/pipeline_common_fe.c
+++ b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,146 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
-#include <cmdline_socket.h>
#include <cmdline.h>
#include "pipeline_common_fe.h"
+#include "parser.h"
+
+struct app_link_params *
+app_pipeline_track_pktq_out_to_link(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t pktq_out_id)
+{
+ struct app_pipeline_params *p;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return NULL;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return NULL;
+
+ for ( ; ; ) {
+ struct app_pktq_out_params *pktq_out =
+ &p->pktq_out[pktq_out_id];
+
+ switch (pktq_out->type) {
+ case APP_PKTQ_OUT_HWQ:
+ {
+ struct app_pktq_hwq_out_params *hwq_out;
+
+ hwq_out = &app->hwq_out_params[pktq_out->id];
+
+ return app_get_link_for_txq(app, hwq_out);
+ }
+
+ case APP_PKTQ_OUT_SWQ:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_swq_params *swq;
+ uint32_t pktq_in_id;
+ int status;
+
+ swq = &app->swq_params[pktq_out->id];
+ p = app_swq_get_reader(app, swq, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_TM:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_tm_params *tm;
+ uint32_t pktq_in_id;
+ int status;
+
+ tm = &app->tm_params[pktq_out->id];
+ p = app_tm_get_reader(app, tm, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_KNI:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_kni_params *kni;
+ uint32_t pktq_in_id;
+ int status;
+
+ kni = &app->kni_params[pktq_out->id];
+ p = app_kni_get_reader(app, kni, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_SINK:
+ default:
+ return NULL;
+ }
+ }
+}
+
+int
+app_pipeline_track_default(struct pipeline_params *p,
+ uint32_t port_in,
+ uint32_t *port_out)
+{
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_out == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
int
app_pipeline_ping(struct app_params *app,
@@ -312,6 +446,40 @@ app_pipeline_port_in_disable(struct app_params *app,
}
int
+app_link_set_op(struct app_params *app,
+ uint32_t link_id,
+ uint32_t pipeline_id,
+ app_link_op op,
+ void *arg)
+{
+ struct app_pipeline_params *pp;
+ struct app_link_params *lp;
+ struct app_link_data *ld;
+ uint32_t ppos, lpos;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (op == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, lp);
+ if (lp == NULL)
+ return -1;
+ lpos = lp - app->link_params;
+ ld = &app->link_data[lpos];
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, pp);
+ if (pp == NULL)
+ return -1;
+ ppos = pp - app->pipeline_params;
+
+ ld->f_link[ppos] = op;
+ ld->arg[ppos] = arg;
+
+ return 0;
+}
+
+int
app_link_config(struct app_params *app,
uint32_t link_id,
uint32_t ip,
@@ -382,6 +550,8 @@ app_link_up(struct app_params *app,
uint32_t link_id)
{
struct app_link_params *p;
+ struct app_link_data *d;
+ int i;
/* Check input arguments */
if (app == NULL)
@@ -394,6 +564,8 @@ app_link_up(struct app_params *app,
return -1;
}
+ d = &app->link_data[p - app->link_params];
+
/* Check link state */
if (p->state) {
APP_LOG(app, HIGH, "%s is already UP", p->name);
@@ -408,6 +580,11 @@ app_link_up(struct app_params *app,
app_link_up_internal(app, p);
+ /* Callbacks */
+ for (i = 0; i < APP_MAX_PIPELINES; i++)
+ if (d->f_link[i])
+ d->f_link[i](app, link_id, 1, d->arg[i]);
+
return 0;
}
@@ -416,6 +593,8 @@ app_link_down(struct app_params *app,
uint32_t link_id)
{
struct app_link_params *p;
+ struct app_link_data *d;
+ uint32_t i;
/* Check input arguments */
if (app == NULL)
@@ -428,6 +607,8 @@ app_link_down(struct app_params *app,
return -1;
}
+ d = &app->link_data[p - app->link_params];
+
/* Check link state */
if (p->state == 0) {
APP_LOG(app, HIGH, "%s is already DOWN", p->name);
@@ -436,6 +617,11 @@ app_link_down(struct app_params *app,
app_link_down_internal(app, p);
+ /* Callbacks */
+ for (i = 0; i < APP_MAX_PIPELINES; i++)
+ if (d->f_link[i])
+ d->f_link[i](app, link_id, 0, d->arg[i]);
+
return 0;
}
@@ -464,16 +650,16 @@ cmd_ping_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_ping_p_string =
+static cmdline_parse_token_string_t cmd_ping_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_ping_result, p_string, "p");
-cmdline_parse_token_num_t cmd_ping_pipeline_id =
+static cmdline_parse_token_num_t cmd_ping_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_ping_result, pipeline_id, UINT32);
-cmdline_parse_token_string_t cmd_ping_ping_string =
+static cmdline_parse_token_string_t cmd_ping_ping_string =
TOKEN_STRING_INITIALIZER(struct cmd_ping_result, ping_string, "ping");
-cmdline_parse_inst_t cmd_ping = {
+static cmdline_parse_inst_t cmd_ping = {
.f = cmd_ping_parsed,
.data = NULL,
.help_str = "Pipeline ping",
@@ -498,6 +684,7 @@ struct cmd_stats_port_in_result {
uint32_t port_in_id;
};
+
static void
cmd_stats_port_in_parsed(
void *parsed_result,
@@ -531,23 +718,23 @@ cmd_stats_port_in_parsed(
stats.stats.n_pkts_drop);
}
-cmdline_parse_token_string_t cmd_stats_port_in_p_string =
+static cmdline_parse_token_string_t cmd_stats_port_in_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_stats_port_in_pipeline_id =
+static cmdline_parse_token_num_t cmd_stats_port_in_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_stats_port_in_stats_string =
+static cmdline_parse_token_string_t cmd_stats_port_in_stats_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, stats_string,
"stats");
-cmdline_parse_token_string_t cmd_stats_port_in_port_string =
+static cmdline_parse_token_string_t cmd_stats_port_in_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, port_string,
"port");
-cmdline_parse_token_string_t cmd_stats_port_in_in_string =
+static cmdline_parse_token_string_t cmd_stats_port_in_in_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, in_string,
"in");
@@ -555,7 +742,7 @@ cmdline_parse_token_string_t cmd_stats_port_in_in_string =
TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, port_in_id,
UINT32);
-cmdline_parse_inst_t cmd_stats_port_in = {
+static cmdline_parse_inst_t cmd_stats_port_in = {
.f = cmd_stats_port_in_parsed,
.data = NULL,
.help_str = "Pipeline input port stats",
@@ -617,31 +804,31 @@ cmd_stats_port_out_parsed(
stats.stats.n_pkts_drop);
}
-cmdline_parse_token_string_t cmd_stats_port_out_p_string =
+static cmdline_parse_token_string_t cmd_stats_port_out_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_stats_port_out_pipeline_id =
+static cmdline_parse_token_num_t cmd_stats_port_out_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_stats_port_out_stats_string =
+static cmdline_parse_token_string_t cmd_stats_port_out_stats_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, stats_string,
"stats");
-cmdline_parse_token_string_t cmd_stats_port_out_port_string =
+static cmdline_parse_token_string_t cmd_stats_port_out_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, port_string,
"port");
-cmdline_parse_token_string_t cmd_stats_port_out_out_string =
+static cmdline_parse_token_string_t cmd_stats_port_out_out_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, out_string,
"out");
-cmdline_parse_token_num_t cmd_stats_port_out_port_out_id =
+static cmdline_parse_token_num_t cmd_stats_port_out_port_out_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, port_out_id,
UINT32);
-cmdline_parse_inst_t cmd_stats_port_out = {
+static cmdline_parse_inst_t cmd_stats_port_out = {
.f = cmd_stats_port_out_parsed,
.data = NULL,
.help_str = "Pipeline output port stats",
@@ -707,26 +894,26 @@ cmd_stats_table_parsed(
stats.n_pkts_dropped_lkp_miss);
}
-cmdline_parse_token_string_t cmd_stats_table_p_string =
+static cmdline_parse_token_string_t cmd_stats_table_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_stats_table_pipeline_id =
+static cmdline_parse_token_num_t cmd_stats_table_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_stats_table_stats_string =
+static cmdline_parse_token_string_t cmd_stats_table_stats_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, stats_string,
"stats");
-cmdline_parse_token_string_t cmd_stats_table_table_string =
+static cmdline_parse_token_string_t cmd_stats_table_table_string =
TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, table_string,
"table");
-cmdline_parse_token_num_t cmd_stats_table_table_id =
+static cmdline_parse_token_num_t cmd_stats_table_table_id =
TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, table_id, UINT32);
-cmdline_parse_inst_t cmd_stats_table = {
+static cmdline_parse_inst_t cmd_stats_table = {
.f = cmd_stats_table_parsed,
.data = NULL,
.help_str = "Pipeline table stats",
@@ -771,31 +958,31 @@ cmd_port_in_enable_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_port_in_enable_p_string =
+static cmdline_parse_token_string_t cmd_port_in_enable_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_port_in_enable_pipeline_id =
+static cmdline_parse_token_num_t cmd_port_in_enable_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_port_in_enable_port_string =
+static cmdline_parse_token_string_t cmd_port_in_enable_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, port_string,
"port");
-cmdline_parse_token_string_t cmd_port_in_enable_in_string =
+static cmdline_parse_token_string_t cmd_port_in_enable_in_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, in_string,
"in");
-cmdline_parse_token_num_t cmd_port_in_enable_port_in_id =
+static cmdline_parse_token_num_t cmd_port_in_enable_port_in_id =
TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, port_in_id,
UINT32);
-cmdline_parse_token_string_t cmd_port_in_enable_enable_string =
+static cmdline_parse_token_string_t cmd_port_in_enable_enable_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result,
enable_string, "enable");
-cmdline_parse_inst_t cmd_port_in_enable = {
+static cmdline_parse_inst_t cmd_port_in_enable = {
.f = cmd_port_in_enable_parsed,
.data = NULL,
.help_str = "Pipeline input port enable",
@@ -841,31 +1028,31 @@ cmd_port_in_disable_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_port_in_disable_p_string =
+static cmdline_parse_token_string_t cmd_port_in_disable_p_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, p_string,
"p");
-cmdline_parse_token_num_t cmd_port_in_disable_pipeline_id =
+static cmdline_parse_token_num_t cmd_port_in_disable_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_port_in_disable_port_string =
+static cmdline_parse_token_string_t cmd_port_in_disable_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, port_string,
"port");
-cmdline_parse_token_string_t cmd_port_in_disable_in_string =
+static cmdline_parse_token_string_t cmd_port_in_disable_in_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, in_string,
"in");
-cmdline_parse_token_num_t cmd_port_in_disable_port_in_id =
+static cmdline_parse_token_num_t cmd_port_in_disable_port_in_id =
TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, port_in_id,
UINT32);
-cmdline_parse_token_string_t cmd_port_in_disable_disable_string =
+static cmdline_parse_token_string_t cmd_port_in_disable_disable_string =
TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result,
disable_string, "disable");
-cmdline_parse_inst_t cmd_port_in_disable = {
+static cmdline_parse_inst_t cmd_port_in_disable = {
.f = cmd_port_in_disable_parsed,
.data = NULL,
.help_str = "Pipeline input port disable",
@@ -963,219 +1150,144 @@ print_link_info(struct app_link_params *p)
printf("\n");
}
-struct cmd_link_config_result {
- cmdline_fixed_string_t link_string;
- uint32_t link_id;
- cmdline_fixed_string_t config_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
-};
-
-static void
-cmd_link_config_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_link_config_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- uint32_t link_id = params->link_id;
- uint32_t ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- uint32_t depth = params->depth;
-
- status = app_link_config(app, link_id, ip, depth);
- if (status)
- printf("Command failed\n");
- else {
- struct app_link_params *p;
-
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
- print_link_info(p);
- }
-}
-
-cmdline_parse_token_string_t cmd_link_config_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_config_result, link_string,
- "link");
-
-cmdline_parse_token_num_t cmd_link_config_link_id =
- TOKEN_NUM_INITIALIZER(struct cmd_link_config_result, link_id, UINT32);
-
-cmdline_parse_token_string_t cmd_link_config_config_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_config_result, config_string,
- "config");
-
-cmdline_parse_token_ipaddr_t cmd_link_config_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_link_config_result, ip);
-
-cmdline_parse_token_num_t cmd_link_config_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_link_config_result, depth, UINT32);
-
-cmdline_parse_inst_t cmd_link_config = {
- .f = cmd_link_config_parsed,
- .data = NULL,
- .help_str = "Link configuration",
- .tokens = {
- (void *)&cmd_link_config_link_string,
- (void *)&cmd_link_config_link_id,
- (void *)&cmd_link_config_config_string,
- (void *)&cmd_link_config_ip,
- (void *)&cmd_link_config_depth,
- NULL,
- },
-};
-
/*
- * link up
+ * link
+ *
+ * link config:
+ * link <linkid> config <ipaddr> <depth>
+ *
+ * link up:
+ * link <linkid> up
+ *
+ * link down:
+ * link <linkid> down
+ *
+ * link ls:
+ * link ls
*/
-struct cmd_link_up_result {
+struct cmd_link_result {
cmdline_fixed_string_t link_string;
- uint32_t link_id;
- cmdline_fixed_string_t up_string;
+ cmdline_multi_string_t multi_string;
};
static void
-cmd_link_up_parsed(
+cmd_link_parsed(
void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
- void *data)
+ void *data)
{
- struct cmd_link_up_result *params = parsed_result;
+ struct cmd_link_result *params = parsed_result;
struct app_params *app = data;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
int status;
- status = app_link_up(app, params->link_id);
- if (status != 0)
- printf("Command failed\n");
- else {
- struct app_link_params *p;
+ uint32_t link_id;
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", params->link_id,
- p);
- print_link_info(p);
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status != 0) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "link");
+ return;
}
-}
-
-cmdline_parse_token_string_t cmd_link_up_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_up_result, link_string,
- "link");
-cmdline_parse_token_num_t cmd_link_up_link_id =
- TOKEN_NUM_INITIALIZER(struct cmd_link_up_result, link_id, UINT32);
+ /* link ls */
+ if ((n_tokens == 1) && (strcmp(tokens[0], "ls") == 0)) {
+ for (link_id = 0; link_id < app->n_links; link_id++) {
+ struct app_link_params *p;
-cmdline_parse_token_string_t cmd_link_up_up_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_up_result, up_string, "up");
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ print_link_info(p);
+ }
+ return;
+ } /* link ls */
-cmdline_parse_inst_t cmd_link_up = {
- .f = cmd_link_up_parsed,
- .data = NULL,
- .help_str = "Link UP",
- .tokens = {
- (void *)&cmd_link_up_link_string,
- (void *)&cmd_link_up_link_id,
- (void *)&cmd_link_up_up_string,
- NULL,
- },
-};
+ if (n_tokens < 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link");
+ return;
+ }
-/*
- * link down
- */
+ if (parser_read_uint32(&link_id, tokens[0])) {
+ printf(CMD_MSG_INVALID_ARG, "linkid");
+ return;
+ }
-struct cmd_link_down_result {
- cmdline_fixed_string_t link_string;
- uint32_t link_id;
- cmdline_fixed_string_t down_string;
-};
+ /* link config */
+ if (strcmp(tokens[1], "config") == 0) {
+ struct in_addr ipaddr_ipv4;
+ uint32_t depth;
-static void
-cmd_link_down_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_link_down_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link config");
+ return;
+ }
- status = app_link_down(app, params->link_id);
- if (status != 0)
- printf("Command failed\n");
- else {
- struct app_link_params *p;
+ if (parse_ipv4_addr(tokens[2], &ipaddr_ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", params->link_id,
- p);
- print_link_info(p);
- }
-}
+ if (parser_read_uint32(&depth, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
-cmdline_parse_token_string_t cmd_link_down_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_down_result, link_string,
- "link");
+ status = app_link_config(app,
+ link_id,
+ rte_be_to_cpu_32(ipaddr_ipv4.s_addr),
+ depth);
+ if (status)
+ printf(CMD_MSG_FAIL, "link config");
-cmdline_parse_token_num_t cmd_link_down_link_id =
- TOKEN_NUM_INITIALIZER(struct cmd_link_down_result, link_id, UINT32);
+ return;
+ } /* link config */
-cmdline_parse_token_string_t cmd_link_down_down_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_down_result, down_string,
- "down");
+ /* link up */
+ if (strcmp(tokens[1], "up") == 0) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link up");
+ return;
+ }
-cmdline_parse_inst_t cmd_link_down = {
- .f = cmd_link_down_parsed,
- .data = NULL,
- .help_str = "Link DOWN",
- .tokens = {
- (void *) &cmd_link_down_link_string,
- (void *) &cmd_link_down_link_id,
- (void *) &cmd_link_down_down_string,
- NULL,
- },
-};
+ status = app_link_up(app, link_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "link up");
-/*
- * link ls
- */
+ return;
+ } /* link up */
-struct cmd_link_ls_result {
- cmdline_fixed_string_t link_string;
- cmdline_fixed_string_t ls_string;
-};
+ /* link down */
+ if (strcmp(tokens[1], "down") == 0) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link down");
+ return;
+ }
-static void
-cmd_link_ls_parsed(
- __attribute__((unused)) void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct app_params *app = data;
- uint32_t link_id;
+ status = app_link_down(app, link_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "link down");
- for (link_id = 0; link_id < app->n_links; link_id++) {
- struct app_link_params *p;
+ return;
+ } /* link down */
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
- print_link_info(p);
- }
+ printf(CMD_MSG_MISMATCH_ARGS, "link");
}
-cmdline_parse_token_string_t cmd_link_ls_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_ls_result, link_string,
- "link");
+static cmdline_parse_token_string_t cmd_link_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_result, link_string, "link");
-cmdline_parse_token_string_t cmd_link_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_ls_result, ls_string, "ls");
+static cmdline_parse_token_string_t cmd_link_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_inst_t cmd_link_ls = {
- .f = cmd_link_ls_parsed,
+static cmdline_parse_inst_t cmd_link = {
+ .f = cmd_link_parsed,
.data = NULL,
- .help_str = "Link list",
+ .help_str = "link config / up / down / ls",
.tokens = {
- (void *)&cmd_link_ls_link_string,
- (void *)&cmd_link_ls_ls_string,
+ (void *) &cmd_link_link_string,
+ (void *) &cmd_link_multi_string,
NULL,
},
};
@@ -1212,6 +1324,11 @@ static cmdline_parse_inst_t cmd_quit = {
/*
* run
+ *
+ * run <file>
+ * run <file> [<count> [<interval>]]
+ <count> default is 1
+ * <interval> is measured in milliseconds, default is 1 second
*/
static void
@@ -1233,9 +1350,9 @@ app_run_file(
close(fd);
}
-struct cmd_run_file_result {
+struct cmd_run_result {
cmdline_fixed_string_t run_string;
- char file_name[APP_FILE_NAME_SIZE];
+ cmdline_multi_string_t multi_string;
};
static void
@@ -1244,25 +1361,87 @@ cmd_run_parsed(
struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_run_file_result *params = parsed_result;
+ struct cmd_run_result *params = parsed_result;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ char *file_name;
+ uint32_t count, interval, i;
- app_run_file(cl->ctx, params->file_name);
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "run");
+ return;
+ }
+
+ switch (n_tokens) {
+ case 0:
+ printf(CMD_MSG_NOT_ENOUGH_ARGS, "run");
+ return;
+
+ case 1:
+ file_name = tokens[0];
+ count = 1;
+ interval = 1000;
+ break;
+
+ case 2:
+ file_name = tokens[0];
+
+ if (parser_read_uint32(&count, tokens[1]) ||
+ (count == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "count");
+ return;
+ }
+
+ interval = 1000;
+ break;
+
+ case 3:
+ file_name = tokens[0];
+
+ if (parser_read_uint32(&count, tokens[1]) ||
+ (count == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "count");
+ return;
+ }
+
+ if (parser_read_uint32(&interval, tokens[2]) ||
+ (interval == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "interval");
+ return;
+ }
+ break;
+
+ default:
+ printf(CMD_MSG_MISMATCH_ARGS, "run");
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ app_run_file(cl->ctx, file_name);
+ if (interval)
+ usleep(interval * 1000);
+ }
}
-cmdline_parse_token_string_t cmd_run_run_string =
- TOKEN_STRING_INITIALIZER(struct cmd_run_file_result, run_string,
- "run");
+static cmdline_parse_token_string_t cmd_run_run_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_result, run_string, "run");
+
+static cmdline_parse_token_string_t cmd_run_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_token_string_t cmd_run_file_name =
- TOKEN_STRING_INITIALIZER(struct cmd_run_file_result, file_name, NULL);
-cmdline_parse_inst_t cmd_run = {
+static cmdline_parse_inst_t cmd_run = {
.f = cmd_run_parsed,
.data = NULL,
.help_str = "Run CLI script file",
.tokens = {
(void *) &cmd_run_run_string,
- (void *) &cmd_run_file_name,
+ (void *) &cmd_run_multi_string,
NULL,
},
};
@@ -1270,12 +1449,7 @@ cmdline_parse_inst_t cmd_run = {
static cmdline_parse_ctx_t pipeline_common_cmds[] = {
(cmdline_parse_inst_t *) &cmd_quit,
(cmdline_parse_inst_t *) &cmd_run,
-
- (cmdline_parse_inst_t *) &cmd_link_config,
- (cmdline_parse_inst_t *) &cmd_link_up,
- (cmdline_parse_inst_t *) &cmd_link_down,
- (cmdline_parse_inst_t *) &cmd_link_ls,
-
+ (cmdline_parse_inst_t *) &cmd_link,
(cmdline_parse_inst_t *) &cmd_ping,
(cmdline_parse_inst_t *) &cmd_stats_port_in,
(cmdline_parse_inst_t *) &cmd_stats_port_out,
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.h b/examples/ip_pipeline/pipeline/pipeline_common_fe.h
index cfad963d..ce0bf13e 100644
--- a/examples/ip_pipeline/pipeline/pipeline_common_fe.h
+++ b/examples/ip_pipeline/pipeline/pipeline_common_fe.h
@@ -182,6 +182,16 @@ app_msg_send_recv(struct app_params *app,
return msg_recv;
}
+struct app_link_params *
+app_pipeline_track_pktq_out_to_link(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t pktq_out_id);
+
+int
+app_pipeline_track_default(struct pipeline_params *params,
+ uint32_t port_in,
+ uint32_t *port_out);
+
int
app_pipeline_ping(struct app_params *app,
uint32_t pipeline_id);
@@ -215,6 +225,13 @@ app_pipeline_port_in_disable(struct app_params *app,
uint32_t port_id);
int
+app_link_set_op(struct app_params *app,
+ uint32_t link_id,
+ uint32_t pipeline_id,
+ app_link_op op,
+ void *arg);
+
+int
app_link_config(struct app_params *app,
uint32_t link_id,
uint32_t ip,
@@ -231,4 +248,13 @@ app_link_down(struct app_params *app,
int
app_pipeline_common_cmd_push(struct app_params *app);
+#define CMD_MSG_OUT_OF_MEMORY "Not enough memory\n"
+#define CMD_MSG_NOT_ENOUGH_ARGS "Not enough arguments for command \"%s\"\n"
+#define CMD_MSG_TOO_MANY_ARGS "Too many arguments for command \"%s\"\n"
+#define CMD_MSG_MISMATCH_ARGS "Incorrect set of arguments for command \"%s\"\n"
+#define CMD_MSG_INVALID_ARG "Invalid value for argument \"%s\"\n"
+#define CMD_MSG_ARG_NOT_FOUND "Syntax error: \"%s\" not found\n"
+#define CMD_MSG_FILE_ERR "Error in file \"%s\" at line %u\n"
+#define CMD_MSG_FAIL "Command \"%s\" failed\n"
+
#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall.c b/examples/ip_pipeline/pipeline/pipeline_firewall.c
index fd897d5c..a82e552d 100644
--- a/examples/ip_pipeline/pipeline/pipeline_firewall.c
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall.c
@@ -30,9 +30,11 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
+#include <errno.h>
#include <stdio.h>
#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
#include <sys/queue.h>
#include <netinet/in.h>
@@ -43,15 +45,11 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
-#include <cmdline_socket.h>
#include "app.h"
#include "pipeline_common_fe.h"
#include "pipeline_firewall.h"
-
-#define BUF_SIZE 1024
+#include "parser.h"
struct app_pipeline_firewall_rule {
struct pipeline_firewall_key key;
@@ -75,18 +73,6 @@ struct app_pipeline_firewall {
void *default_rule_entry_ptr;
};
-struct app_pipeline_add_bulk_params {
- struct pipeline_firewall_key *keys;
- uint32_t n_keys;
- uint32_t *priorities;
- uint32_t *port_ids;
-};
-
-struct app_pipeline_del_bulk_params {
- struct pipeline_firewall_key *keys;
- uint32_t n_keys;
-};
-
static void
print_firewall_ipv4_rule(struct app_pipeline_firewall_rule *rule)
{
@@ -272,356 +258,118 @@ app_pipeline_firewall_key_check_and_normalize(struct pipeline_firewall_key *key)
}
}
-static int
-app_pipeline_add_bulk_parse_file(char *filename,
- struct app_pipeline_add_bulk_params *params)
+int
+app_pipeline_firewall_load_file(char *filename,
+ struct pipeline_firewall_key *keys,
+ uint32_t *priorities,
+ uint32_t *port_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
{
- FILE *f;
- char file_buf[BUF_SIZE];
- uint32_t i;
- int status = 0;
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
- f = fopen(filename, "r");
- if (f == NULL)
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (priorities == NULL) ||
+ (port_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
return -1;
-
- params->n_keys = 0;
- while (fgets(file_buf, BUF_SIZE, f) != NULL)
- params->n_keys++;
- rewind(f);
-
- if (params->n_keys == 0) {
- status = -1;
- goto end;
- }
-
- params->keys = rte_malloc(NULL,
- params->n_keys * sizeof(struct pipeline_firewall_key),
- RTE_CACHE_LINE_SIZE);
- if (params->keys == NULL) {
- status = -1;
- goto end;
- }
-
- params->priorities = rte_malloc(NULL,
- params->n_keys * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (params->priorities == NULL) {
- status = -1;
- goto end;
- }
-
- params->port_ids = rte_malloc(NULL,
- params->n_keys * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (params->port_ids == NULL) {
- status = -1;
- goto end;
- }
-
- i = 0;
- while (fgets(file_buf, BUF_SIZE, f) != NULL) {
- char *str;
-
- str = strtok(file_buf, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->priorities[i] = atoi(str);
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip = atoi(str)<<24;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<16;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<8;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
}
- params->keys[i].key.ipv4_5tuple.src_ip_mask = atoi(str);
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip = atoi(str)<<24;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<16;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<8;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip_mask = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_port_from = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_port_to = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_port_from = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_port_to = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.proto = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- /* Need to add 2 to str to skip leading 0x */
- params->keys[i].key.ipv4_5tuple.proto_mask = strtol(str+2, NULL, 16);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->port_ids[i] = atoi(str);
- params->keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
-
- i++;
- }
-
-end:
- fclose(f);
- return status;
-}
-
-static int
-app_pipeline_del_bulk_parse_file(char *filename,
- struct app_pipeline_del_bulk_params *params)
-{
- FILE *f;
- char file_buf[BUF_SIZE];
- uint32_t i;
- int status = 0;
+ /* Open input file */
f = fopen(filename, "r");
- if (f == NULL)
+ if (f == NULL) {
+ *line = 0;
return -1;
-
- params->n_keys = 0;
- while (fgets(file_buf, BUF_SIZE, f) != NULL)
- params->n_keys++;
- rewind(f);
-
- if (params->n_keys == 0) {
- status = -1;
- goto end;
- }
-
- params->keys = rte_malloc(NULL,
- params->n_keys * sizeof(struct pipeline_firewall_key),
- RTE_CACHE_LINE_SIZE);
- if (params->keys == NULL) {
- status = -1;
- goto end;
}
- i = 0;
- while (fgets(file_buf, BUF_SIZE, f) != NULL) {
- char *str;
-
- str = strtok(file_buf, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip = atoi(str)<<24;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<16;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str)<<8;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip |= atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_ip_mask = atoi(str);
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip = atoi(str)<<24;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<16;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str)<<8;
-
- str = strtok(NULL, " .");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip |= atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_ip_mask = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_port_from = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.src_port_to = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_port_from = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.dst_port_to = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- params->keys[i].key.ipv4_5tuple.proto = atoi(str);
-
- str = strtok(NULL, " ");
- if (str == NULL) {
- status = -1;
- goto end;
- }
- /* Need to add 2 to str to skip leading 0x */
- params->keys[i].key.ipv4_5tuple.proto_mask = strtol(str+2, NULL, 16);
-
- params->keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ uint32_t priority = 0;
+ struct in_addr sipaddr;
+ uint32_t sipdepth = 0;
+ struct in_addr dipaddr;
+ uint32_t dipdepth = 0;
+ uint16_t sport0 = 0;
+ uint16_t sport1 = 0;
+ uint16_t dport0 = 0;
+ uint16_t dport1 = 0;
+ uint8_t proto = 0;
+ uint8_t protomask = 0;
+ uint32_t port_id = 0;
+
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 15) ||
+ strcmp(tokens[0], "priority") ||
+ parser_read_uint32(&priority, tokens[1]) ||
+ strcmp(tokens[2], "ipv4") ||
+ parse_ipv4_addr(tokens[3], &sipaddr) ||
+ parser_read_uint32(&sipdepth, tokens[4]) ||
+ parse_ipv4_addr(tokens[5], &dipaddr) ||
+ parser_read_uint32(&dipdepth, tokens[6]) ||
+ parser_read_uint16(&sport0, tokens[7]) ||
+ parser_read_uint16(&sport1, tokens[8]) ||
+ parser_read_uint16(&dport0, tokens[9]) ||
+ parser_read_uint16(&dport1, tokens[10]) ||
+ parser_read_uint8(&proto, tokens[11]) ||
+ parser_read_uint8_hex(&protomask, tokens[12]) ||
+ strcmp(tokens[13], "port") ||
+ parser_read_uint32(&port_id, tokens[14]))
+ goto error1;
+
+ keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ keys[i].key.ipv4_5tuple.src_ip =
+ rte_be_to_cpu_32(sipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.src_ip_mask = sipdepth;
+ keys[i].key.ipv4_5tuple.dst_ip =
+ rte_be_to_cpu_32(dipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ keys[i].key.ipv4_5tuple.src_port_from = sport0;
+ keys[i].key.ipv4_5tuple.src_port_to = sport1;
+ keys[i].key.ipv4_5tuple.dst_port_from = dport0;
+ keys[i].key.ipv4_5tuple.dst_port_to = dport1;
+ keys[i].key.ipv4_5tuple.proto = proto;
+ keys[i].key.ipv4_5tuple.proto_mask = protomask;
+
+ port_ids[i] = port_id;
+ priorities[i] = priority;
+
+ if (app_pipeline_firewall_key_check_and_normalize(&keys[i]))
+ goto error1;
i++;
}
- for (i = 0; i < params->n_keys; i++) {
- if (app_pipeline_firewall_key_check_and_normalize(&params->keys[i]) != 0) {
- status = -1;
- goto end;
- }
- }
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
-end:
+error1:
+ *line = l;
fclose(f);
- return status;
+ return -1;
}
int
@@ -804,14 +552,14 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
return -1;
rules = rte_malloc(NULL,
- n_keys * sizeof(struct app_pipeline_firewall_rule *),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(struct app_pipeline_firewall_rule *),
+ RTE_CACHE_LINE_SIZE);
if (rules == NULL)
return -1;
new_rules = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
if (new_rules == NULL) {
rte_free(rules);
return -1;
@@ -834,8 +582,9 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
rules[i] = app_pipeline_firewall_rule_find(p, &keys[i]);
new_rules[i] = (rules[i] == NULL);
if (rules[i] == NULL) {
- rules[i] = rte_malloc(NULL, sizeof(*rules[i]),
- RTE_CACHE_LINE_SIZE);
+ rules[i] = rte_malloc(NULL,
+ sizeof(*rules[i]),
+ RTE_CACHE_LINE_SIZE);
if (rules[i] == NULL) {
uint32_t j;
@@ -852,8 +601,8 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
}
keys_found = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
if (keys_found == NULL) {
uint32_t j;
@@ -867,8 +616,8 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
}
entries_ptr = rte_malloc(NULL,
- n_keys * sizeof(struct rte_pipeline_table_entry *),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(struct rte_pipeline_table_entry *),
+ RTE_CACHE_LINE_SIZE);
if (entries_ptr == NULL) {
uint32_t j;
@@ -883,8 +632,8 @@ app_pipeline_firewall_add_bulk(struct app_params *app,
}
for (i = 0; i < n_keys; i++) {
entries_ptr[i] = rte_malloc(NULL,
- sizeof(struct rte_pipeline_table_entry),
- RTE_CACHE_LINE_SIZE);
+ sizeof(struct rte_pipeline_table_entry),
+ RTE_CACHE_LINE_SIZE);
if (entries_ptr[i] == NULL) {
uint32_t j;
@@ -1030,8 +779,8 @@ app_pipeline_firewall_delete_bulk(struct app_params *app,
return -1;
rules = rte_malloc(NULL,
- n_keys * sizeof(struct app_pipeline_firewall_rule *),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(struct app_pipeline_firewall_rule *),
+ RTE_CACHE_LINE_SIZE);
if (rules == NULL)
return -1;
@@ -1044,8 +793,8 @@ app_pipeline_firewall_delete_bulk(struct app_params *app,
}
keys_found = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
if (keys_found == NULL) {
rte_free(rules);
return -1;
@@ -1197,668 +946,500 @@ app_pipeline_firewall_delete_default_rule(struct app_params *app,
}
/*
- * p firewall add ipv4
+ * firewall
+ *
+ * firewall add:
+ * p <pipelineid> firewall add priority <priority>
+ * ipv4 <sipaddr> <sipdepth> <dipaddr> <dipdepth>
+ * <sport0> <sport1> <dport0> <dport1> <proto> <protomask>
+ * port <portid>
+ * Note: <protomask> is a hex value
+ *
+ * p <pipelineid> firewall add bulk <file>
+ *
+ * firewall add default:
+ * p <pipelineid> firewall add default <port ID>
+ *
+ * firewall del:
+ * p <pipelineid> firewall del
+ * ipv4 <sipaddr> <sipdepth> <dipaddr> <dipdepth>
+ * <sport0> <sport1> <dport0> <dport1> <proto> <protomask>
+ *
+ * p <pipelineid> firewall del bulk <file>
+ *
+ * firewall del default:
+ * p <pipelineid> firewall del default
+ *
+ * firewall ls:
+ * p <pipelineid> firewall ls
*/
-struct cmd_firewall_add_ipv4_result {
+struct cmd_firewall_result {
cmdline_fixed_string_t p_string;
uint32_t pipeline_id;
cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv4_string;
- int32_t priority;
- cmdline_ipaddr_t src_ip;
- uint32_t src_ip_mask;
- cmdline_ipaddr_t dst_ip;
- uint32_t dst_ip_mask;
- uint16_t src_port_from;
- uint16_t src_port_to;
- uint16_t dst_port_from;
- uint16_t dst_port_to;
- uint8_t proto;
- uint8_t proto_mask;
- uint8_t port_id;
+ cmdline_multi_string_t multi_string;
};
-static void
-cmd_firewall_add_ipv4_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
+static void cmd_firewall_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
void *data)
{
- struct cmd_firewall_add_ipv4_result *params = parsed_result;
+ struct cmd_firewall_result *params = parsed_result;
struct app_params *app = data;
- struct pipeline_firewall_key key;
int status;
- key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
- key.key.ipv4_5tuple.src_ip = rte_bswap32(
- (uint32_t) params->src_ip.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.src_ip_mask = params->src_ip_mask;
- key.key.ipv4_5tuple.dst_ip = rte_bswap32(
- (uint32_t) params->dst_ip.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.dst_ip_mask = params->dst_ip_mask;
- key.key.ipv4_5tuple.src_port_from = params->src_port_from;
- key.key.ipv4_5tuple.src_port_to = params->src_port_to;
- key.key.ipv4_5tuple.dst_port_from = params->dst_port_from;
- key.key.ipv4_5tuple.dst_port_to = params->dst_port_to;
- key.key.ipv4_5tuple.proto = params->proto;
- key.key.ipv4_5tuple.proto_mask = params->proto_mask;
-
- status = app_pipeline_firewall_add_rule(app,
- params->pipeline_id,
- &key,
- params->priority,
- params->port_id);
-
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
-}
-
-cmdline_parse_token_string_t cmd_firewall_add_ipv4_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result, p_string,
- "p");
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_firewall_add_ipv4_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- firewall_string, "firewall");
-
-cmdline_parse_token_string_t cmd_firewall_add_ipv4_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- add_string, "add");
-
-cmdline_parse_token_string_t cmd_firewall_add_ipv4_ipv4_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- ipv4_string, "ipv4");
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_priority =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, priority,
- INT32);
-
-cmdline_parse_token_ipaddr_t cmd_firewall_add_ipv4_src_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_firewall_add_ipv4_result, src_ip);
+ char *tokens[17];
+ uint32_t n_tokens = RTE_DIM(tokens);
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_ip_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, src_ip_mask,
- UINT32);
-
-cmdline_parse_token_ipaddr_t cmd_firewall_add_ipv4_dst_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_firewall_add_ipv4_result, dst_ip);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_ip_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result, dst_ip_mask,
- UINT32);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_port_from =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- src_port_from, UINT16);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_src_port_to =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- src_port_to, UINT16);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_port_from =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- dst_port_from, UINT16);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_dst_port_to =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- dst_port_to, UINT16);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- proto, UINT8);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_proto_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- proto_mask, UINT8);
-
-cmdline_parse_token_num_t cmd_firewall_add_ipv4_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_ipv4_result,
- port_id, UINT8);
-
-cmdline_parse_inst_t cmd_firewall_add_ipv4 = {
- .f = cmd_firewall_add_ipv4_parsed,
- .data = NULL,
- .help_str = "Firewall rule add",
- .tokens = {
- (void *) &cmd_firewall_add_ipv4_p_string,
- (void *) &cmd_firewall_add_ipv4_pipeline_id,
- (void *) &cmd_firewall_add_ipv4_firewall_string,
- (void *) &cmd_firewall_add_ipv4_add_string,
- (void *) &cmd_firewall_add_ipv4_ipv4_string,
- (void *) &cmd_firewall_add_ipv4_priority,
- (void *) &cmd_firewall_add_ipv4_src_ip,
- (void *) &cmd_firewall_add_ipv4_src_ip_mask,
- (void *) &cmd_firewall_add_ipv4_dst_ip,
- (void *) &cmd_firewall_add_ipv4_dst_ip_mask,
- (void *) &cmd_firewall_add_ipv4_src_port_from,
- (void *) &cmd_firewall_add_ipv4_src_port_to,
- (void *) &cmd_firewall_add_ipv4_dst_port_from,
- (void *) &cmd_firewall_add_ipv4_dst_port_to,
- (void *) &cmd_firewall_add_ipv4_proto,
- (void *) &cmd_firewall_add_ipv4_proto_mask,
- (void *) &cmd_firewall_add_ipv4_port_id,
- NULL,
- },
-};
-
-/*
- * p firewall del ipv4
- */
-
-struct cmd_firewall_del_ipv4_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t ipv4_string;
- cmdline_ipaddr_t src_ip;
- uint32_t src_ip_mask;
- cmdline_ipaddr_t dst_ip;
- uint32_t dst_ip_mask;
- uint16_t src_port_from;
- uint16_t src_port_to;
- uint16_t dst_port_from;
- uint16_t dst_port_to;
- uint8_t proto;
- uint8_t proto_mask;
-};
-
-static void
-cmd_firewall_del_ipv4_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_del_ipv4_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_firewall_key key;
- int status;
-
- key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
- key.key.ipv4_5tuple.src_ip = rte_bswap32(
- (uint32_t) params->src_ip.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.src_ip_mask = params->src_ip_mask;
- key.key.ipv4_5tuple.dst_ip = rte_bswap32(
- (uint32_t) params->dst_ip.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.dst_ip_mask = params->dst_ip_mask;
- key.key.ipv4_5tuple.src_port_from = params->src_port_from;
- key.key.ipv4_5tuple.src_port_to = params->src_port_to;
- key.key.ipv4_5tuple.dst_port_from = params->dst_port_from;
- key.key.ipv4_5tuple.dst_port_to = params->dst_port_to;
- key.key.ipv4_5tuple.proto = params->proto;
- key.key.ipv4_5tuple.proto_mask = params->proto_mask;
-
- status = app_pipeline_firewall_delete_rule(app,
- params->pipeline_id,
- &key);
-
- if (status != 0) {
- printf("Command failed\n");
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "firewall");
return;
}
-}
-
-cmdline_parse_token_string_t cmd_firewall_del_ipv4_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result, p_string,
- "p");
-
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_firewall_del_ipv4_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- firewall_string, "firewall");
-
-cmdline_parse_token_string_t cmd_firewall_del_ipv4_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- del_string, "del");
-cmdline_parse_token_string_t cmd_firewall_del_ipv4_ipv4_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- ipv4_string, "ipv4");
-
-cmdline_parse_token_ipaddr_t cmd_firewall_del_ipv4_src_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_ip);
-
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_ip_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_ip_mask,
- UINT32);
-
-cmdline_parse_token_ipaddr_t cmd_firewall_del_ipv4_dst_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_firewall_del_ipv4_result, dst_ip);
-
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_ip_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, dst_ip_mask,
- UINT32);
+ /* firewall add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "priority") == 0)) {
+ struct pipeline_firewall_key key;
+ uint32_t priority;
+ struct in_addr sipaddr;
+ uint32_t sipdepth;
+ struct in_addr dipaddr;
+ uint32_t dipdepth;
+ uint16_t sport0;
+ uint16_t sport1;
+ uint16_t dport0;
+ uint16_t dport1;
+ uint8_t proto;
+ uint8_t protomask;
+ uint32_t port_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 16) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_port_from =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- src_port_from, UINT16);
+ if (parser_read_uint32(&priority, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "priority");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_src_port_to =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, src_port_to,
- UINT16);
+ if (strcmp(tokens[3], "ipv4")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "ipv4");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_port_from =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- dst_port_from, UINT16);
+ if (parse_ipv4_addr(tokens[4], &sipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "sipaddr");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_dst_port_to =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- dst_port_to, UINT16);
+ if (parser_read_uint32(&sipdepth, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "sipdepth");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result,
- proto, UINT8);
+ if (parse_ipv4_addr(tokens[6], &dipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "dipaddr");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_ipv4_proto_mask =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_ipv4_result, proto_mask,
- UINT8);
+ if (parser_read_uint32(&dipdepth, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "dipdepth");
+ return;
+ }
-cmdline_parse_inst_t cmd_firewall_del_ipv4 = {
- .f = cmd_firewall_del_ipv4_parsed,
- .data = NULL,
- .help_str = "Firewall rule delete",
- .tokens = {
- (void *) &cmd_firewall_del_ipv4_p_string,
- (void *) &cmd_firewall_del_ipv4_pipeline_id,
- (void *) &cmd_firewall_del_ipv4_firewall_string,
- (void *) &cmd_firewall_del_ipv4_del_string,
- (void *) &cmd_firewall_del_ipv4_ipv4_string,
- (void *) &cmd_firewall_del_ipv4_src_ip,
- (void *) &cmd_firewall_del_ipv4_src_ip_mask,
- (void *) &cmd_firewall_del_ipv4_dst_ip,
- (void *) &cmd_firewall_del_ipv4_dst_ip_mask,
- (void *) &cmd_firewall_del_ipv4_src_port_from,
- (void *) &cmd_firewall_del_ipv4_src_port_to,
- (void *) &cmd_firewall_del_ipv4_dst_port_from,
- (void *) &cmd_firewall_del_ipv4_dst_port_to,
- (void *) &cmd_firewall_del_ipv4_proto,
- (void *) &cmd_firewall_del_ipv4_proto_mask,
- NULL,
- },
-};
+ if (parser_read_uint16(&sport0, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "sport0");
+ return;
+ }
-/*
- * p firewall add bulk
- */
+ if (parser_read_uint16(&sport1, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "sport1");
+ return;
+ }
-struct cmd_firewall_add_bulk_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t bulk_string;
- cmdline_fixed_string_t file_path;
-};
+ if (parser_read_uint16(&dport0, tokens[10])) {
+ printf(CMD_MSG_INVALID_ARG, "dport0");
+ return;
+ }
-static void
-cmd_firewall_add_bulk_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_add_bulk_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ if (parser_read_uint16(&dport1, tokens[11])) {
+ printf(CMD_MSG_INVALID_ARG, "dport1");
+ return;
+ }
- struct app_pipeline_add_bulk_params add_bulk_params;
+ if (parser_read_uint8(&proto, tokens[12])) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
- status = app_pipeline_add_bulk_parse_file(params->file_path, &add_bulk_params);
- if (status != 0) {
- printf("Command failed\n");
- goto end;
- }
+ if (parser_read_uint8_hex(&protomask, tokens[13])) {
+ printf(CMD_MSG_INVALID_ARG, "protomask");
+ return;
+ }
- status = app_pipeline_firewall_add_bulk(app, params->pipeline_id, add_bulk_params.keys,
- add_bulk_params.n_keys, add_bulk_params.priorities, add_bulk_params.port_ids);
- if (status != 0) {
- printf("Command failed\n");
- goto end;
- }
+ if (strcmp(tokens[14], "port")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-end:
- rte_free(add_bulk_params.keys);
- rte_free(add_bulk_params.priorities);
- rte_free(add_bulk_params.port_ids);
-}
+ if (parser_read_uint32(&port_id, tokens[15])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_add_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result, p_string,
- "p");
+ key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.src_ip = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.src_ip_mask = sipdepth;
+ key.key.ipv4_5tuple.dst_ip = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ key.key.ipv4_5tuple.src_port_from = sport0;
+ key.key.ipv4_5tuple.src_port_to = sport1;
+ key.key.ipv4_5tuple.dst_port_from = dport0;
+ key.key.ipv4_5tuple.dst_port_to = dport1;
+ key.key.ipv4_5tuple.proto = proto;
+ key.key.ipv4_5tuple.proto_mask = protomask;
+
+ status = app_pipeline_firewall_add_rule(app,
+ params->pipeline_id,
+ &key,
+ priority,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add");
-cmdline_parse_token_num_t cmd_firewall_add_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_bulk_result, pipeline_id,
- UINT32);
+ return;
+ } /* firewall add */
+
+ /* firewall add bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_firewall_key *keys;
+ uint32_t *priorities, *port_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add bulk");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_add_bulk_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
- firewall_string, "firewall");
+ filename = tokens[2];
-cmdline_parse_token_string_t cmd_firewall_add_bulk_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
- add_string, "add");
+ n_keys = APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_firewall_key));
+ if (keys == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
+ memset(keys, 0, n_keys * sizeof(struct pipeline_firewall_key));
-cmdline_parse_token_string_t cmd_firewall_add_bulk_bulk_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
- bulk_string, "bulk");
+ priorities = malloc(n_keys * sizeof(uint32_t));
+ if (priorities == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(keys);
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_add_bulk_file_path_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_bulk_result,
- file_path, NULL);
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(priorities);
+ free(keys);
+ return;
+ }
-cmdline_parse_inst_t cmd_firewall_add_bulk = {
- .f = cmd_firewall_add_bulk_parsed,
- .data = NULL,
- .help_str = "Firewall rule add bulk",
- .tokens = {
- (void *) &cmd_firewall_add_bulk_p_string,
- (void *) &cmd_firewall_add_bulk_pipeline_id,
- (void *) &cmd_firewall_add_bulk_firewall_string,
- (void *) &cmd_firewall_add_bulk_add_string,
- (void *) &cmd_firewall_add_bulk_bulk_string,
- (void *) &cmd_firewall_add_bulk_file_path_string,
- NULL,
- },
-};
+ status = app_pipeline_firewall_load_file(filename,
+ keys,
+ priorities,
+ port_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(port_ids);
+ free(priorities);
+ free(keys);
+ return;
+ }
-/*
- * p firewall del bulk
- */
+ status = app_pipeline_firewall_add_bulk(app,
+ params->pipeline_id,
+ keys,
+ n_keys,
+ priorities,
+ port_ids);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add bulk");
+
+ free(keys);
+ free(priorities);
+ free(port_ids);
+ return;
+ } /* firewall add bulk */
-struct cmd_firewall_del_bulk_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t bulk_string;
- cmdline_fixed_string_t file_path;
-};
+ /* firewall add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
-static void
-cmd_firewall_del_bulk_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_del_bulk_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add default");
+ return;
+ }
- struct app_pipeline_del_bulk_params del_bulk_params;
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
- status = app_pipeline_del_bulk_parse_file(params->file_path, &del_bulk_params);
- if (status != 0) {
- printf("Command failed\n");
- goto end;
- }
+ status = app_pipeline_firewall_add_default_rule(app,
+ params->pipeline_id,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add default");
- status = app_pipeline_firewall_delete_bulk(app, params->pipeline_id,
- del_bulk_params.keys, del_bulk_params.n_keys);
- if (status != 0) {
- printf("Command failed\n");
- goto end;
- }
+ return;
+ } /* firewall add default */
+
+ /* firewall del */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0)) {
+ struct pipeline_firewall_key key;
+ struct in_addr sipaddr;
+ uint32_t sipdepth;
+ struct in_addr dipaddr;
+ uint32_t dipdepth;
+ uint16_t sport0;
+ uint16_t sport1;
+ uint16_t dport0;
+ uint16_t dport1;
+ uint8_t proto;
+ uint8_t protomask;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 12) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del");
+ return;
+ }
-end:
- rte_free(del_bulk_params.keys);
-}
+ if (parse_ipv4_addr(tokens[2], &sipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "sipaddr");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result, p_string,
- "p");
+ if (parser_read_uint32(&sipdepth, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "sipdepth");
+ return;
+ }
-cmdline_parse_token_num_t cmd_firewall_del_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_bulk_result, pipeline_id,
- UINT32);
+ if (parse_ipv4_addr(tokens[4], &dipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "dipaddr");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
- firewall_string, "firewall");
+ if (parser_read_uint32(&dipdepth, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "dipdepth");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
- del_string, "del");
+ if (parser_read_uint16(&sport0, tokens[6])) {
+ printf(CMD_MSG_INVALID_ARG, "sport0");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_bulk_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
- bulk_string, "bulk");
+ if (parser_read_uint16(&sport1, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "sport1");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_bulk_file_path_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_bulk_result,
- file_path, NULL);
+ if (parser_read_uint16(&dport0, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "dport0");
+ return;
+ }
-cmdline_parse_inst_t cmd_firewall_del_bulk = {
- .f = cmd_firewall_del_bulk_parsed,
- .data = NULL,
- .help_str = "Firewall rule del bulk",
- .tokens = {
- (void *) &cmd_firewall_del_bulk_p_string,
- (void *) &cmd_firewall_del_bulk_pipeline_id,
- (void *) &cmd_firewall_del_bulk_firewall_string,
- (void *) &cmd_firewall_del_bulk_add_string,
- (void *) &cmd_firewall_del_bulk_bulk_string,
- (void *) &cmd_firewall_del_bulk_file_path_string,
- NULL,
- },
-};
+ if (parser_read_uint16(&dport1, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "dport1");
+ return;
+ }
-/*
- * p firewall add default
- */
-struct cmd_firewall_add_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t default_string;
- uint8_t port_id;
-};
+ if (parser_read_uint8(&proto, tokens[10])) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-static void
-cmd_firewall_add_default_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_add_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ if (parser_read_uint8_hex(&protomask, tokens[11])) {
+ printf(CMD_MSG_INVALID_ARG, "protomask");
+ return;
+ }
- status = app_pipeline_firewall_add_default_rule(app,
- params->pipeline_id,
- params->port_id);
+ key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.src_ip = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.src_ip_mask = sipdepth;
+ key.key.ipv4_5tuple.dst_ip = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ key.key.ipv4_5tuple.src_port_from = sport0;
+ key.key.ipv4_5tuple.src_port_to = sport1;
+ key.key.ipv4_5tuple.dst_port_from = dport0;
+ key.key.ipv4_5tuple.dst_port_to = dport1;
+ key.key.ipv4_5tuple.proto = proto;
+ key.key.ipv4_5tuple.proto_mask = protomask;
+
+ status = app_pipeline_firewall_delete_rule(app,
+ params->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-cmdline_parse_token_string_t cmd_firewall_add_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_firewall_add_default_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_default_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_firewall_add_default_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
- firewall_string, "firewall");
-
-cmdline_parse_token_string_t cmd_firewall_add_default_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
- add_string, "add");
+ } /* firewall del */
+
+ /* firewall del bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_firewall_key *keys;
+ uint32_t *priorities, *port_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del bulk");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_add_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_add_default_result,
- default_string, "default");
+ filename = tokens[2];
-cmdline_parse_token_num_t cmd_firewall_add_default_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_add_default_result, port_id,
- UINT8);
+ n_keys = APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_firewall_key));
+ if (keys == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
+ memset(keys, 0, n_keys * sizeof(struct pipeline_firewall_key));
-cmdline_parse_inst_t cmd_firewall_add_default = {
- .f = cmd_firewall_add_default_parsed,
- .data = NULL,
- .help_str = "Firewall default rule add",
- .tokens = {
- (void *) &cmd_firewall_add_default_p_string,
- (void *) &cmd_firewall_add_default_pipeline_id,
- (void *) &cmd_firewall_add_default_firewall_string,
- (void *) &cmd_firewall_add_default_add_string,
- (void *) &cmd_firewall_add_default_default_string,
- (void *) &cmd_firewall_add_default_port_id,
- NULL,
- },
-};
+ priorities = malloc(n_keys * sizeof(uint32_t));
+ if (priorities == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(keys);
+ return;
+ }
-/*
- * p firewall del default
- */
-struct cmd_firewall_del_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t default_string;
-};
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(priorities);
+ free(keys);
+ return;
+ }
-static void
-cmd_firewall_del_default_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_del_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ status = app_pipeline_firewall_load_file(filename,
+ keys,
+ priorities,
+ port_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(port_ids);
+ free(priorities);
+ free(keys);
+ return;
+ }
- status = app_pipeline_firewall_delete_default_rule(app,
- params->pipeline_id);
+ status = app_pipeline_firewall_delete_bulk(app,
+ params->pipeline_id,
+ keys,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del bulk");
- if (status != 0) {
- printf("Command failed\n");
+ free(port_ids);
+ free(priorities);
+ free(keys);
return;
- }
-}
-
-cmdline_parse_token_string_t cmd_firewall_del_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_firewall_del_default_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_del_default_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_firewall_del_default_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
- firewall_string, "firewall");
-
-cmdline_parse_token_string_t cmd_firewall_del_default_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
- del_string, "del");
+ } /* firewall del bulk */
+
+ /* firewall del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del default");
+ return;
+ }
-cmdline_parse_token_string_t cmd_firewall_del_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_del_default_result,
- default_string, "default");
+ status = app_pipeline_firewall_delete_default_rule(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del default");
-cmdline_parse_inst_t cmd_firewall_del_default = {
- .f = cmd_firewall_del_default_parsed,
- .data = NULL,
- .help_str = "Firewall default rule delete",
- .tokens = {
- (void *) &cmd_firewall_del_default_p_string,
- (void *) &cmd_firewall_del_default_pipeline_id,
- (void *) &cmd_firewall_del_default_firewall_string,
- (void *) &cmd_firewall_del_default_del_string,
- (void *) &cmd_firewall_del_default_default_string,
- NULL,
- },
-};
+ return;
-/*
- * p firewall ls
- */
+ } /* firewall del default */
-struct cmd_firewall_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_fixed_string_t ls_string;
-};
-
-static void
-cmd_firewall_ls_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ /* firewall ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall ls");
+ return;
+ }
- status = app_pipeline_firewall_ls(app, params->pipeline_id);
+ status = app_pipeline_firewall_ls(app, params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall ls");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
+ } /* firewall ls */
+
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall");
}
-cmdline_parse_token_string_t cmd_firewall_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result, p_string,
- "p");
+static cmdline_parse_token_string_t cmd_firewall_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, p_string, "p");
-cmdline_parse_token_num_t cmd_firewall_ls_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_ls_result, pipeline_id,
- UINT32);
+static cmdline_parse_token_num_t cmd_firewall_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_result, pipeline_id, UINT32);
-cmdline_parse_token_string_t cmd_firewall_ls_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result,
- firewall_string, "firewall");
+static cmdline_parse_token_string_t cmd_firewall_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, firewall_string,
+ "firewall");
-cmdline_parse_token_string_t cmd_firewall_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_firewall_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_inst_t cmd_firewall_ls = {
- .f = cmd_firewall_ls_parsed,
+static cmdline_parse_inst_t cmd_firewall = {
+ .f = cmd_firewall_parsed,
.data = NULL,
- .help_str = "Firewall rule list",
+ .help_str = "firewall add / add bulk / add default / del / del bulk"
+ " / del default / ls",
.tokens = {
- (void *) &cmd_firewall_ls_p_string,
- (void *) &cmd_firewall_ls_pipeline_id,
- (void *) &cmd_firewall_ls_firewall_string,
- (void *) &cmd_firewall_ls_ls_string,
+ (void *) &cmd_firewall_p_string,
+ (void *) &cmd_firewall_pipeline_id,
+ (void *) &cmd_firewall_firewall_string,
+ (void *) &cmd_firewall_multi_string,
NULL,
},
};
static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_firewall_add_ipv4,
- (cmdline_parse_inst_t *) &cmd_firewall_del_ipv4,
- (cmdline_parse_inst_t *) &cmd_firewall_add_bulk,
- (cmdline_parse_inst_t *) &cmd_firewall_del_bulk,
- (cmdline_parse_inst_t *) &cmd_firewall_add_default,
- (cmdline_parse_inst_t *) &cmd_firewall_del_default,
- (cmdline_parse_inst_t *) &cmd_firewall_ls,
+ (cmdline_parse_inst_t *) &cmd_firewall,
NULL,
};
static struct pipeline_fe_ops pipeline_firewall_fe_ops = {
.f_init = app_pipeline_firewall_init,
+ .f_post_init = NULL,
.f_free = app_pipeline_firewall_free,
+ .f_track = app_pipeline_track_default,
.cmds = pipeline_cmds,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall.h b/examples/ip_pipeline/pipeline/pipeline_firewall.h
index ccc4e64b..aa79a2a0 100644
--- a/examples/ip_pipeline/pipeline/pipeline_firewall.h
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall.h
@@ -72,6 +72,18 @@ int
app_pipeline_firewall_delete_default_rule(struct app_params *app,
uint32_t pipeline_id);
+#ifndef APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE
+#define APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE 65536
+#endif
+
+int
+app_pipeline_firewall_load_file(char *filename,
+ struct pipeline_firewall_key *keys,
+ uint32_t *priorities,
+ uint32_t *port_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
extern struct pipeline_type pipeline_firewall;
#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
index e7a8a4c5..b61f3034 100644
--- a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
@@ -565,27 +565,6 @@ pipeline_firewall_free(void *pipeline)
}
static int
-pipeline_firewall_track(void *pipeline,
- __rte_unused uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_in == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-static int
pipeline_firewall_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
@@ -732,7 +711,7 @@ pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
n_keys = req->n_keys;
for (i = 0; i < n_keys; i++) {
- entries[i] = rte_malloc(NULL,
+ entries[i] = rte_zmalloc(NULL,
sizeof(struct firewall_table_entry),
RTE_CACHE_LINE_SIZE);
if (entries[i] == NULL) {
@@ -740,7 +719,7 @@ pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
return rsp;
}
- params[i] = rte_malloc(NULL,
+ params[i] = rte_zmalloc(NULL,
sizeof(struct rte_table_acl_rule_add_params),
RTE_CACHE_LINE_SIZE);
if (params[i] == NULL) {
@@ -814,7 +793,7 @@ pipeline_firewall_msg_req_del_bulk_handler(struct pipeline *p, void *msg)
n_keys = req->n_keys;
for (i = 0; i < n_keys; i++) {
- params[i] = rte_malloc(NULL,
+ params[i] = rte_zmalloc(NULL,
sizeof(struct rte_table_acl_rule_delete_params),
RTE_CACHE_LINE_SIZE);
if (params[i] == NULL) {
@@ -903,5 +882,4 @@ struct pipeline_be_ops pipeline_firewall_be_ops = {
.f_free = pipeline_firewall_free,
.f_run = NULL,
.f_timer = pipeline_firewall_timer,
- .f_track = pipeline_firewall_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions.c b/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
index 4012121f..bf12fd7b 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
@@ -35,6 +35,7 @@
#include <string.h>
#include <sys/queue.h>
#include <netinet/in.h>
+#include <unistd.h>
#include <rte_common.h>
#include <rte_hexdump.h>
@@ -43,13 +44,12 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
#include "app.h"
#include "pipeline_common_fe.h"
#include "pipeline_flow_actions.h"
#include "hash_func.h"
+#include "parser.h"
/*
* Flow actions pipeline
@@ -689,1121 +689,620 @@ app_pipeline_fa_dscp_ls(struct app_params *app,
return 0;
}
-/*
- * Flow meter configuration (single flow)
- *
- * p <pipeline ID> flow <flow ID> meter <meter ID> trtcm <trtcm params>
- */
-
-struct cmd_fa_meter_config_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- uint32_t flow_id;
- cmdline_fixed_string_t meter_string;
- uint32_t meter_id;
- cmdline_fixed_string_t trtcm_string;
- uint64_t cir;
- uint64_t pir;
- uint64_t cbs;
- uint64_t pbs;
-};
-
-static void
-cmd_fa_meter_config_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_meter_config_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params flow_params;
- int status;
-
- if (params->meter_id >= PIPELINE_FA_N_TC_MAX) {
- printf("Command failed\n");
- return;
- }
-
- flow_params.m[params->meter_id].cir = params->cir;
- flow_params.m[params->meter_id].pir = params->pir;
- flow_params.m[params->meter_id].cbs = params->cbs;
- flow_params.m[params->meter_id].pbs = params->pbs;
-
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- params->flow_id,
- 1 << params->meter_id,
- 0,
- 0,
- &flow_params);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fa_meter_config_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
- flow_string, "flow");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
- flow_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_meter_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
- meter_string, "meter");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_meter_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result,
- meter_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_trtcm_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_result,
- trtcm_string, "trtcm");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_cir =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, cir, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_pir =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, pir, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_cbs =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, cbs, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_pbs =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_result, pbs, UINT64);
-
-cmdline_parse_inst_t cmd_fa_meter_config = {
- .f = cmd_fa_meter_config_parsed,
- .data = NULL,
- .help_str = "Flow meter configuration (single flow) ",
- .tokens = {
- (void *) &cmd_fa_meter_config_p_string,
- (void *) &cmd_fa_meter_config_pipeline_id,
- (void *) &cmd_fa_meter_config_flow_string,
- (void *) &cmd_fa_meter_config_flow_id,
- (void *) &cmd_fa_meter_config_meter_string,
- (void *) &cmd_fa_meter_config_meter_id,
- (void *) &cmd_fa_meter_config_trtcm_string,
- (void *) &cmd_fa_meter_config_cir,
- (void *) &cmd_fa_meter_config_pir,
- (void *) &cmd_fa_meter_config_cbs,
- (void *) &cmd_fa_meter_config_pbs,
- NULL,
- },
-};
-
-/*
- * Flow meter configuration (multiple flows)
- *
- * p <pipeline ID> flows <n_flows> meter <meter ID> trtcm <trtcm params>
- */
-
-struct cmd_fa_meter_config_bulk_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flows_string;
- uint32_t n_flows;
- cmdline_fixed_string_t meter_string;
- uint32_t meter_id;
- cmdline_fixed_string_t trtcm_string;
- uint64_t cir;
- uint64_t pir;
- uint64_t cbs;
- uint64_t pbs;
-};
-
-static void
-cmd_fa_meter_config_bulk_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
+int
+app_pipeline_fa_load_file(char *filename,
+ uint32_t *flow_ids,
+ struct pipeline_fa_flow_params *p,
+ uint32_t *n_flows,
+ uint32_t *line)
{
- struct cmd_fa_meter_config_bulk_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params flow_template, *flow_params;
- uint32_t *flow_id;
- uint32_t i;
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
- if ((params->n_flows == 0) ||
- (params->meter_id >= PIPELINE_FA_N_TC_MAX)) {
- printf("Invalid arguments\n");
- return;
- }
-
- flow_id = (uint32_t *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (flow_ids == NULL) ||
+ (p == NULL) ||
+ (n_flows == NULL) ||
+ (*n_flows == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
- flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
- RTE_CACHE_LINE_SIZE);
- if (flow_params == NULL) {
- rte_free(flow_id);
- printf("Memory allocation failed\n");
- return;
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
}
- memset(&flow_template, 0, sizeof(flow_template));
- flow_template.m[params->meter_id].cir = params->cir;
- flow_template.m[params->meter_id].pir = params->pir;
- flow_template.m[params->meter_id].cbs = params->cbs;
- flow_template.m[params->meter_id].pbs = params->pbs;
+ /* Read file */
+ for (i = 0, l = 1; i < *n_flows; l++) {
+ char *tokens[64];
+ uint32_t n_tokens = RTE_DIM(tokens);
- for (i = 0; i < params->n_flows; i++) {
- uint32_t pos = i % N_FLOWS_BULK;
+ int status;
- flow_id[pos] = i;
- memcpy(&flow_params[pos],
- &flow_template,
- sizeof(flow_template));
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
- if ((pos == N_FLOWS_BULK - 1) ||
- (i == params->n_flows - 1)) {
- int status;
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
- status = app_pipeline_fa_flow_config_bulk(app,
- params->pipeline_id,
- flow_id,
- pos + 1,
- 1 << params->meter_id,
- 0,
- 0,
- flow_params);
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
- if (status != 0) {
- printf("Command failed\n");
- break;
- }
- }
+ if ((n_tokens != 64) ||
+ /* flow */
+ strcmp(tokens[0], "flow") ||
+ parser_read_uint32(&flow_ids[i], tokens[1]) ||
+
+ /* meter & policer 0 */
+ strcmp(tokens[2], "meter") ||
+ strcmp(tokens[3], "0") ||
+ strcmp(tokens[4], "trtcm") ||
+ parser_read_uint64(&p[i].m[0].cir, tokens[5]) ||
+ parser_read_uint64(&p[i].m[0].pir, tokens[6]) ||
+ parser_read_uint64(&p[i].m[0].cbs, tokens[7]) ||
+ parser_read_uint64(&p[i].m[0].pbs, tokens[8]) ||
+ strcmp(tokens[9], "policer") ||
+ strcmp(tokens[10], "0") ||
+ strcmp(tokens[11], "g") ||
+ string_to_policer_action(tokens[12],
+ &p[i].p[0].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[13], "y") ||
+ string_to_policer_action(tokens[14],
+ &p[i].p[0].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[15], "r") ||
+ string_to_policer_action(tokens[16],
+ &p[i].p[0].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 1 */
+ strcmp(tokens[17], "meter") ||
+ strcmp(tokens[18], "1") ||
+ strcmp(tokens[19], "trtcm") ||
+ parser_read_uint64(&p[i].m[1].cir, tokens[20]) ||
+ parser_read_uint64(&p[i].m[1].pir, tokens[21]) ||
+ parser_read_uint64(&p[i].m[1].cbs, tokens[22]) ||
+ parser_read_uint64(&p[i].m[1].pbs, tokens[23]) ||
+ strcmp(tokens[24], "policer") ||
+ strcmp(tokens[25], "1") ||
+ strcmp(tokens[26], "g") ||
+ string_to_policer_action(tokens[27],
+ &p[i].p[1].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[28], "y") ||
+ string_to_policer_action(tokens[29],
+ &p[i].p[1].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[30], "r") ||
+ string_to_policer_action(tokens[31],
+ &p[i].p[1].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 2 */
+ strcmp(tokens[32], "meter") ||
+ strcmp(tokens[33], "2") ||
+ strcmp(tokens[34], "trtcm") ||
+ parser_read_uint64(&p[i].m[2].cir, tokens[35]) ||
+ parser_read_uint64(&p[i].m[2].pir, tokens[36]) ||
+ parser_read_uint64(&p[i].m[2].cbs, tokens[37]) ||
+ parser_read_uint64(&p[i].m[2].pbs, tokens[38]) ||
+ strcmp(tokens[39], "policer") ||
+ strcmp(tokens[40], "2") ||
+ strcmp(tokens[41], "g") ||
+ string_to_policer_action(tokens[42],
+ &p[i].p[2].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[43], "y") ||
+ string_to_policer_action(tokens[44],
+ &p[i].p[2].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[45], "r") ||
+ string_to_policer_action(tokens[46],
+ &p[i].p[2].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 3 */
+ strcmp(tokens[47], "meter") ||
+ strcmp(tokens[48], "3") ||
+ strcmp(tokens[49], "trtcm") ||
+ parser_read_uint64(&p[i].m[3].cir, tokens[50]) ||
+ parser_read_uint64(&p[i].m[3].pir, tokens[51]) ||
+ parser_read_uint64(&p[i].m[3].cbs, tokens[52]) ||
+ parser_read_uint64(&p[i].m[3].pbs, tokens[53]) ||
+ strcmp(tokens[54], "policer") ||
+ strcmp(tokens[55], "3") ||
+ strcmp(tokens[56], "g") ||
+ string_to_policer_action(tokens[57],
+ &p[i].p[3].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[58], "y") ||
+ string_to_policer_action(tokens[59],
+ &p[i].p[3].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[60], "r") ||
+ string_to_policer_action(tokens[61],
+ &p[i].p[3].action[e_RTE_METER_RED]) ||
+
+ /* port */
+ strcmp(tokens[62], "port") ||
+ parser_read_uint32(&p[i].port_id, tokens[63]))
+ goto error1;
+
+ i++;
}
- rte_free(flow_params);
- rte_free(flow_id);
+ /* Close file */
+ *n_flows = i;
+ fclose(f);
+ return 0;
+error1:
+ *line = l;
+ fclose(f);
+ return -1;
}
-cmdline_parse_token_string_t cmd_fa_meter_config_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_bulk_flows_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- flows_string, "flows");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- n_flows, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_bulk_meter_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- meter_string, "meter");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_meter_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- meter_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_meter_config_bulk_trtcm_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- trtcm_string, "trtcm");
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_cir =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- cir, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pir =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- pir, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_cbs =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- cbs, UINT64);
-
-cmdline_parse_token_num_t cmd_fa_meter_config_bulk_pbs =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_meter_config_bulk_result,
- pbs, UINT64);
-
-cmdline_parse_inst_t cmd_fa_meter_config_bulk = {
- .f = cmd_fa_meter_config_bulk_parsed,
- .data = NULL,
- .help_str = "Flow meter configuration (multiple flows)",
- .tokens = {
- (void *) &cmd_fa_meter_config_bulk_p_string,
- (void *) &cmd_fa_meter_config_bulk_pipeline_id,
- (void *) &cmd_fa_meter_config_bulk_flows_string,
- (void *) &cmd_fa_meter_config_bulk_n_flows,
- (void *) &cmd_fa_meter_config_bulk_meter_string,
- (void *) &cmd_fa_meter_config_bulk_meter_id,
- (void *) &cmd_fa_meter_config_bulk_trtcm_string,
- (void *) &cmd_fa_meter_config_cir,
- (void *) &cmd_fa_meter_config_pir,
- (void *) &cmd_fa_meter_config_cbs,
- (void *) &cmd_fa_meter_config_pbs,
- NULL,
- },
-};
-
/*
- * Flow policer configuration (single flow)
+ * action
*
- * p <pipeline ID> flow <flow ID> policer <policer ID>
- * G <action> Y <action> R <action>
+ * flow meter, policer and output port configuration:
+ * p <pipelineid> action flow <flowid> meter <meterid> trtcm <cir> <pir> <cbs> <pbs>
*
- * <action> = G (green) | Y (yellow) | R (red) | D (drop)
- */
-
-struct cmd_fa_policer_config_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- uint32_t flow_id;
- cmdline_fixed_string_t policer_string;
- uint32_t policer_id;
- cmdline_fixed_string_t green_string;
- cmdline_fixed_string_t g_action;
- cmdline_fixed_string_t yellow_string;
- cmdline_fixed_string_t y_action;
- cmdline_fixed_string_t red_string;
- cmdline_fixed_string_t r_action;
-};
-
-static void
-cmd_fa_policer_config_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_policer_config_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params flow_params;
- int status;
-
- if (params->policer_id >= PIPELINE_FA_N_TC_MAX) {
- printf("Command failed\n");
- return;
- }
-
- status = string_to_policer_action(params->g_action,
- &flow_params.p[params->policer_id].action[e_RTE_METER_GREEN]);
- if (status)
- printf("Invalid policer green action\n");
-
- status = string_to_policer_action(params->y_action,
- &flow_params.p[params->policer_id].action[e_RTE_METER_YELLOW]);
- if (status)
- printf("Invalid policer yellow action\n");
-
- status = string_to_policer_action(params->r_action,
- &flow_params.p[params->policer_id].action[e_RTE_METER_RED]);
- if (status)
- printf("Invalid policer red action\n");
-
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- params->flow_id,
- 0,
- 1 << params->policer_id,
- 0,
- &flow_params);
-
- if (status != 0)
- printf("Command failed\n");
-
-}
-
-cmdline_parse_token_string_t cmd_fa_policer_config_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- flow_string, "flow");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
- flow_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_policer_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- policer_string, "policer");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_policer_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_result,
- policer_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_green_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- green_string, "G");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_g_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- g_action, "R#Y#G#D");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_yellow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- yellow_string, "Y");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_y_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- y_action, "R#Y#G#D");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_red_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- red_string, "R");
-
-cmdline_parse_token_string_t cmd_fa_policer_config_r_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_result,
- r_action, "R#Y#G#D");
-
-cmdline_parse_inst_t cmd_fa_policer_config = {
- .f = cmd_fa_policer_config_parsed,
- .data = NULL,
- .help_str = "Flow policer configuration (single flow)",
- .tokens = {
- (void *) &cmd_fa_policer_config_p_string,
- (void *) &cmd_fa_policer_config_pipeline_id,
- (void *) &cmd_fa_policer_config_flow_string,
- (void *) &cmd_fa_policer_config_flow_id,
- (void *) &cmd_fa_policer_config_policer_string,
- (void *) &cmd_fa_policer_config_policer_id,
- (void *) &cmd_fa_policer_config_green_string,
- (void *) &cmd_fa_policer_config_g_action,
- (void *) &cmd_fa_policer_config_yellow_string,
- (void *) &cmd_fa_policer_config_y_action,
- (void *) &cmd_fa_policer_config_red_string,
- (void *) &cmd_fa_policer_config_r_action,
- NULL,
- },
-};
-
-/*
- * Flow policer configuration (multiple flows)
+ * p <pipelineid> action flow <flowid> policer <policerid> g <gaction> y <yaction> r <raction>
+ * <action> is one of the following:
+ * G = recolor to green
+ * Y = recolor as yellow
+ * R = recolor as red
+ * D = drop
*
- * p <pipeline ID> flows <n_flows> policer <policer ID>
- * G <action> Y <action> R <action>
+ * p <pipelineid> action flow <flowid> port <port ID>
*
- * <action> = G (green) | Y (yellow) | R (red) | D (drop)
- */
+ * p <pipelineid> action flow bulk <file>
+ *
+ * flow policer stats read:
+ * p <pipelineid> action flow <flowid> stats
+ *
+ * flow ls:
+ * p <pipelineid> action flow ls
+ *
+ * dscp table configuration:
+ * p <pipelineid> action dscp <dscpid> class <class ID> color <color>
+ *
+ * dscp table ls:
+ * p <pipelineid> action dscp ls
+**/
-struct cmd_fa_policer_config_bulk_result {
+struct cmd_action_result {
cmdline_fixed_string_t p_string;
uint32_t pipeline_id;
- cmdline_fixed_string_t flows_string;
- uint32_t n_flows;
- cmdline_fixed_string_t policer_string;
- uint32_t policer_id;
- cmdline_fixed_string_t green_string;
- cmdline_fixed_string_t g_action;
- cmdline_fixed_string_t yellow_string;
- cmdline_fixed_string_t y_action;
- cmdline_fixed_string_t red_string;
- cmdline_fixed_string_t r_action;
+ cmdline_fixed_string_t action_string;
+ cmdline_multi_string_t multi_string;
};
static void
-cmd_fa_policer_config_bulk_parsed(
+cmd_action_parsed(
void *parsed_result,
__rte_unused struct cmdline *cl,
void *data)
{
- struct cmd_fa_policer_config_bulk_result *params = parsed_result;
+ struct cmd_action_result *params = parsed_result;
struct app_params *app = data;
- struct pipeline_fa_flow_params flow_template, *flow_params;
- uint32_t *flow_id, i;
- int status;
- if ((params->n_flows == 0) ||
- (params->policer_id >= PIPELINE_FA_N_TC_MAX)) {
- printf("Invalid arguments\n");
- return;
- }
-
- flow_id = (uint32_t *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
- flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
- RTE_CACHE_LINE_SIZE);
- if (flow_params == NULL) {
- rte_free(flow_id);
- printf("Memory allocation failed\n");
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status != 0) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "action");
return;
}
- memset(&flow_template, 0, sizeof(flow_template));
-
- status = string_to_policer_action(params->g_action,
- &flow_template.p[params->policer_id].action[e_RTE_METER_GREEN]);
- if (status)
- printf("Invalid policer green action\n");
-
- status = string_to_policer_action(params->y_action,
- &flow_template.p[params->policer_id].action[e_RTE_METER_YELLOW]);
- if (status)
- printf("Invalid policer yellow action\n");
-
- status = string_to_policer_action(params->r_action,
- &flow_template.p[params->policer_id].action[e_RTE_METER_RED]);
- if (status)
- printf("Invalid policer red action\n");
-
- for (i = 0; i < params->n_flows; i++) {
- uint32_t pos = i % N_FLOWS_BULK;
-
- flow_id[pos] = i;
- memcpy(&flow_params[pos], &flow_template,
- sizeof(flow_template));
-
- if ((pos == N_FLOWS_BULK - 1) ||
- (i == params->n_flows - 1)) {
- int status;
-
- status = app_pipeline_fa_flow_config_bulk(app,
- params->pipeline_id,
- flow_id,
- pos + 1,
- 0,
- 1 << params->policer_id,
- 0,
- flow_params);
-
- if (status != 0) {
- printf("Command failed\n");
-
- break;
- }
+ /* action flow meter */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "meter") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, meter_id;
+
+ if (n_tokens != 9) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow meter");
+ return;
}
- }
-
- rte_free(flow_params);
- rte_free(flow_id);
-
-}
-
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_flows_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- flows_string, "flows");
-
-cmdline_parse_token_num_t cmd_fa_policer_config_bulk_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- n_flows, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_policer_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- policer_string, "policer");
-cmdline_parse_token_num_t cmd_fa_policer_config_bulk_policer_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- policer_id, UINT32);
+ memset(&flow_params, 0, sizeof(flow_params));
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_green_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- green_string, "G");
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_g_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- g_action, "R#Y#G#D");
+ if (parser_read_uint32(&meter_id, tokens[3]) ||
+ (meter_id >= PIPELINE_FA_N_TC_MAX)) {
+ printf(CMD_MSG_INVALID_ARG, "meterid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_yellow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- yellow_string, "Y");
+ if (strcmp(tokens[4], "trtcm")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "trtcm");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_y_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- y_action, "R#Y#G#D");
+ if (parser_read_uint64(&flow_params.m[meter_id].cir, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "cir");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_red_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- red_string, "R");
+ if (parser_read_uint64(&flow_params.m[meter_id].pir, tokens[6])) {
+ printf(CMD_MSG_INVALID_ARG, "pir");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_policer_config_bulk_r_action =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_config_bulk_result,
- r_action, "R#Y#G#D");
+ if (parser_read_uint64(&flow_params.m[meter_id].cbs, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "cbs");
+ return;
+ }
-cmdline_parse_inst_t cmd_fa_policer_config_bulk = {
- .f = cmd_fa_policer_config_bulk_parsed,
- .data = NULL,
- .help_str = "Flow policer configuration (multiple flows)",
- .tokens = {
- (void *) &cmd_fa_policer_config_bulk_p_string,
- (void *) &cmd_fa_policer_config_bulk_pipeline_id,
- (void *) &cmd_fa_policer_config_bulk_flows_string,
- (void *) &cmd_fa_policer_config_bulk_n_flows,
- (void *) &cmd_fa_policer_config_bulk_policer_string,
- (void *) &cmd_fa_policer_config_bulk_policer_id,
- (void *) &cmd_fa_policer_config_bulk_green_string,
- (void *) &cmd_fa_policer_config_bulk_g_action,
- (void *) &cmd_fa_policer_config_bulk_yellow_string,
- (void *) &cmd_fa_policer_config_bulk_y_action,
- (void *) &cmd_fa_policer_config_bulk_red_string,
- (void *) &cmd_fa_policer_config_bulk_r_action,
- NULL,
- },
-};
+ if (parser_read_uint64(&flow_params.m[meter_id].pbs, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "pbs");
+ return;
+ }
-/*
- * Flow output port configuration (single flow)
- *
- * p <pipeline ID> flow <flow ID> port <port ID>
- */
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 1 << meter_id,
+ 0,
+ 0,
+ &flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow meter");
-struct cmd_fa_output_port_config_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- uint32_t flow_id;
- cmdline_fixed_string_t port_string;
- uint32_t port_id;
-};
+ return;
+ } /* action flow meter */
+
+ /* action flow policer */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "policer") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, policer_id;
+
+ if (n_tokens != 10) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow policer");
+ return;
+ }
-static void
-cmd_fa_output_port_config_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_output_port_config_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params flow_params;
- int status;
+ memset(&flow_params, 0, sizeof(flow_params));
- flow_params.port_id = params->port_id;
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- params->flow_id,
- 0,
- 0,
- 1,
- &flow_params);
+ if (parser_read_uint32(&policer_id, tokens[3]) ||
+ (policer_id >= PIPELINE_FA_N_TC_MAX)) {
+ printf(CMD_MSG_INVALID_ARG, "policerid");
+ return;
+ }
- if (status != 0)
- printf("Command failed\n");
-}
+ if (strcmp(tokens[4], "g")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "g");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_output_port_config_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
- p_string, "p");
+ if (string_to_policer_action(tokens[5],
+ &flow_params.p[policer_id].action[e_RTE_METER_GREEN])) {
+ printf(CMD_MSG_INVALID_ARG, "gaction");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fa_output_port_config_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
- pipeline_id, UINT32);
+ if (strcmp(tokens[6], "y")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "y");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_output_port_config_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
- flow_string, "flow");
+ if (string_to_policer_action(tokens[7],
+ &flow_params.p[policer_id].action[e_RTE_METER_YELLOW])) {
+ printf(CMD_MSG_INVALID_ARG, "yaction");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fa_output_port_config_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
- flow_id, UINT32);
+ if (strcmp(tokens[8], "r")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "r");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_output_port_config_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_result,
- port_string, "port");
+ if (string_to_policer_action(tokens[9],
+ &flow_params.p[policer_id].action[e_RTE_METER_RED])) {
+ printf(CMD_MSG_INVALID_ARG, "raction");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fa_output_port_config_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_result,
- port_id, UINT32);
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 0,
+ 1 << policer_id,
+ 0,
+ &flow_params);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "action flow policer");
-cmdline_parse_inst_t cmd_fa_output_port_config = {
- .f = cmd_fa_output_port_config_parsed,
- .data = NULL,
- .help_str = "Flow output port configuration (single flow)",
- .tokens = {
- (void *) &cmd_fa_output_port_config_p_string,
- (void *) &cmd_fa_output_port_config_pipeline_id,
- (void *) &cmd_fa_output_port_config_flow_string,
- (void *) &cmd_fa_output_port_config_flow_id,
- (void *) &cmd_fa_output_port_config_port_string,
- (void *) &cmd_fa_output_port_config_port_id,
- NULL,
- },
-};
+ return;
+ } /* action flow policer */
+
+ /* action flow port */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "port") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, port_id;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow port");
+ return;
+ }
-/*
- * Flow output port configuration (multiple flows)
- *
- * p <pipeline ID> flows <n_flows> ports <n_ports>
- */
+ memset(&flow_params, 0, sizeof(flow_params));
-struct cmd_fa_output_port_config_bulk_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flows_string;
- uint32_t n_flows;
- cmdline_fixed_string_t ports_string;
- uint32_t n_ports;
-};
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-static void
-cmd_fa_output_port_config_bulk_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_output_port_config_bulk_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_flow_params *flow_params;
- uint32_t *flow_id;
- uint32_t i;
+ if (parser_read_uint32(&port_id, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
- if (params->n_flows == 0) {
- printf("Invalid arguments\n");
- return;
- }
+ flow_params.port_id = port_id;
- flow_id = (uint32_t *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 0,
+ 0,
+ 1,
+ &flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow port");
- flow_params = (struct pipeline_fa_flow_params *) rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(struct pipeline_fa_flow_params),
- RTE_CACHE_LINE_SIZE);
- if (flow_params == NULL) {
- rte_free(flow_id);
- printf("Memory allocation failed\n");
return;
- }
-
- for (i = 0; i < params->n_flows; i++) {
- uint32_t pos = i % N_FLOWS_BULK;
- uint32_t port_id = i % params->n_ports;
-
- flow_id[pos] = i;
-
- memset(&flow_params[pos], 0, sizeof(flow_params[pos]));
- flow_params[pos].port_id = port_id;
+ } /* action flow port */
+
+ /* action flow stats */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "stats") == 0)) {
+ struct pipeline_fa_policer_stats stats;
+ uint32_t flow_id, policer_id;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow stats");
+ return;
+ }
- if ((pos == N_FLOWS_BULK - 1) ||
- (i == params->n_flows - 1)) {
- int status;
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
- status = app_pipeline_fa_flow_config_bulk(app,
+ for (policer_id = 0;
+ policer_id < PIPELINE_FA_N_TC_MAX;
+ policer_id++) {
+ status = app_pipeline_fa_flow_policer_stats_read(app,
params->pipeline_id,
flow_id,
- pos + 1,
- 0,
- 0,
+ policer_id,
1,
- flow_params);
-
+ &stats);
if (status != 0) {
- printf("Command failed\n");
-
- break;
+ printf(CMD_MSG_FAIL, "action flow stats");
+ return;
}
- }
- }
-
- rte_free(flow_params);
- rte_free(flow_id);
-
-}
-
-cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_flows_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- flows_string, "flows");
-
-cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- n_flows, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_output_port_config_bulk_ports_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- ports_string, "ports");
-
-cmdline_parse_token_num_t cmd_fa_output_port_config_bulk_n_ports =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_output_port_config_bulk_result,
- n_ports, UINT32);
-
-cmdline_parse_inst_t cmd_fa_output_port_config_bulk = {
- .f = cmd_fa_output_port_config_bulk_parsed,
- .data = NULL,
- .help_str = "Flow output port configuration (multiple flows)",
- .tokens = {
- (void *) &cmd_fa_output_port_config_bulk_p_string,
- (void *) &cmd_fa_output_port_config_bulk_pipeline_id,
- (void *) &cmd_fa_output_port_config_bulk_flows_string,
- (void *) &cmd_fa_output_port_config_bulk_n_flows,
- (void *) &cmd_fa_output_port_config_bulk_ports_string,
- (void *) &cmd_fa_output_port_config_bulk_n_ports,
- NULL,
- },
-};
-
-/*
- * Flow DiffServ Code Point (DSCP) translation table configuration
- *
- * p <pipeline ID> dscp <DSCP ID> class <traffic class ID> color <color>
- *
- * <color> = G (green) | Y (yellow) | R (red)
-*/
-struct cmd_fa_dscp_config_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t dscp_string;
- uint32_t dscp_id;
- cmdline_fixed_string_t class_string;
- uint32_t traffic_class_id;
- cmdline_fixed_string_t color_string;
- cmdline_fixed_string_t color;
-
-};
-
-static void
-cmd_fa_dscp_config_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_dscp_config_result *params = parsed_result;
- struct app_params *app = data;
- enum rte_meter_color color;
- int status;
+ /* Display stats */
+ printf("\tPolicer: %" PRIu32
+ "\tPkts G: %" PRIu64
+ "\tPkts Y: %" PRIu64
+ "\tPkts R: %" PRIu64
+ "\tPkts D: %" PRIu64 "\n",
+ policer_id,
+ stats.n_pkts[e_RTE_METER_GREEN],
+ stats.n_pkts[e_RTE_METER_YELLOW],
+ stats.n_pkts[e_RTE_METER_RED],
+ stats.n_pkts_drop);
+ }
- status = string_to_color(params->color, &color);
- if (status) {
- printf("Invalid color\n");
return;
- }
-
- status = app_pipeline_fa_dscp_config(app,
- params->pipeline_id,
- params->dscp_id,
- params->traffic_class_id,
- color);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_dscp_config_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_dscp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- dscp_string, "dscp");
-
-cmdline_parse_token_num_t cmd_fa_dscp_config_dscp_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
- dscp_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_class_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- class_string, "class");
-
-cmdline_parse_token_num_t cmd_fa_dscp_config_traffic_class_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_config_result,
- traffic_class_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_color_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- color_string, "color");
-
-cmdline_parse_token_string_t cmd_fa_dscp_config_color =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_config_result,
- color, "G#Y#R");
+ } /* action flow stats */
+
+ /* action flow bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_fa_flow_params *flow_params;
+ uint32_t *flow_ids, n_flows, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow bulk");
+ return;
+ }
-cmdline_parse_inst_t cmd_fa_dscp_config = {
- .f = cmd_fa_dscp_config_parsed,
- .data = NULL,
- .help_str = "Flow DSCP translation table configuration",
- .tokens = {
- (void *) &cmd_fa_dscp_config_p_string,
- (void *) &cmd_fa_dscp_config_pipeline_id,
- (void *) &cmd_fa_dscp_config_dscp_string,
- (void *) &cmd_fa_dscp_config_dscp_id,
- (void *) &cmd_fa_dscp_config_class_string,
- (void *) &cmd_fa_dscp_config_traffic_class_id,
- (void *) &cmd_fa_dscp_config_color_string,
- (void *) &cmd_fa_dscp_config_color,
- NULL,
- },
-};
+ filename = tokens[2];
-/*
- * Flow policer stats read
- *
- * p <pipeline ID> flow <flow ID> policer <policer ID> stats
- */
+ n_flows = APP_PIPELINE_FA_MAX_RECORDS_IN_FILE;
+ flow_ids = malloc(n_flows * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
-struct cmd_fa_policer_stats_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- uint32_t flow_id;
- cmdline_fixed_string_t policer_string;
- uint32_t policer_id;
- cmdline_fixed_string_t stats_string;
-};
+ flow_params = malloc(n_flows * sizeof(struct pipeline_fa_flow_params));
+ if (flow_params == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(flow_ids);
+ return;
+ }
-static void
-cmd_fa_policer_stats_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_policer_stats_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fa_policer_stats stats;
- int status;
+ status = app_pipeline_fa_load_file(filename,
+ flow_ids,
+ flow_params,
+ &n_flows,
+ &line);
+ if (status) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_params);
+ free(flow_ids);
+ return;
+ }
- status = app_pipeline_fa_flow_policer_stats_read(app,
- params->pipeline_id,
- params->flow_id,
- params->policer_id,
- 1,
- &stats);
- if (status != 0) {
- printf("Command failed\n");
+ status = app_pipeline_fa_flow_config_bulk(app,
+ params->pipeline_id,
+ flow_ids,
+ n_flows,
+ 0xF,
+ 0xF,
+ 1,
+ flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow bulk");
+
+ free(flow_params);
+ free(flow_ids);
return;
- }
-
- /* Display stats */
- printf("\tPkts G: %" PRIu64
- "\tPkts Y: %" PRIu64
- "\tPkts R: %" PRIu64
- "\tPkts D: %" PRIu64 "\n",
- stats.n_pkts[e_RTE_METER_GREEN],
- stats.n_pkts[e_RTE_METER_YELLOW],
- stats.n_pkts[e_RTE_METER_RED],
- stats.n_pkts_drop);
-}
-
-cmdline_parse_token_string_t cmd_fa_policer_stats_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fa_policer_stats_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_stats_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
- flow_string, "flow");
-
-cmdline_parse_token_num_t cmd_fa_policer_stats_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
- flow_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_stats_policer_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
- policer_string, "policer");
-
-cmdline_parse_token_num_t cmd_fa_policer_stats_policer_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_policer_stats_result,
- policer_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fa_policer_stats_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_policer_stats_result,
- stats_string, "stats");
-
-cmdline_parse_inst_t cmd_fa_policer_stats = {
- .f = cmd_fa_policer_stats_parsed,
- .data = NULL,
- .help_str = "Flow policer stats read",
- .tokens = {
- (void *) &cmd_fa_policer_stats_p_string,
- (void *) &cmd_fa_policer_stats_pipeline_id,
- (void *) &cmd_fa_policer_stats_flow_string,
- (void *) &cmd_fa_policer_stats_flow_id,
- (void *) &cmd_fa_policer_stats_policer_string,
- (void *) &cmd_fa_policer_stats_policer_id,
- (void *) &cmd_fa_policer_stats_string,
- NULL,
- },
-};
-
-/*
- * Flow list
- *
- * p <pipeline ID> flow ls
- */
+ } /* action flow bulk */
+
+ /* action flow ls */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ (strcmp(tokens[1], "ls") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow ls");
+ return;
+ }
-struct cmd_fa_flow_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t actions_string;
- cmdline_fixed_string_t ls_string;
-};
+ status = app_pipeline_fa_flow_ls(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow ls");
-static void
-cmd_fa_flow_ls_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_flow_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_fa_flow_ls(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
-}
+ return;
+ } /* action flow ls */
+
+ /* action dscp */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "dscp") == 0) &&
+ strcmp(tokens[1], "ls")) {
+ uint32_t dscp_id, tc_id;
+ enum rte_meter_color color;
+
+ if (n_tokens != 6) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action dscp");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_flow_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
- p_string, "p");
+ if (parser_read_uint32(&dscp_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "dscpid");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fa_flow_ls_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_flow_ls_result,
- pipeline_id, UINT32);
+ if (strcmp(tokens[2], "class")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "class");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_flow_ls_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
- flow_string, "flow");
+ if (parser_read_uint32(&tc_id, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "classid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_flow_ls_actions_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
- actions_string, "actions");
+ if (strcmp(tokens[4], "color")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "color");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fa_flow_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_flow_ls_result,
- ls_string, "ls");
+ if (string_to_color(tokens[5], &color)) {
+ printf(CMD_MSG_INVALID_ARG, "colorid");
+ return;
+ }
-cmdline_parse_inst_t cmd_fa_flow_ls = {
- .f = cmd_fa_flow_ls_parsed,
- .data = NULL,
- .help_str = "Flow actions list",
- .tokens = {
- (void *) &cmd_fa_flow_ls_p_string,
- (void *) &cmd_fa_flow_ls_pipeline_id,
- (void *) &cmd_fa_flow_ls_flow_string,
- (void *) &cmd_fa_flow_ls_actions_string,
- (void *) &cmd_fa_flow_ls_ls_string,
- NULL,
- },
-};
+ status = app_pipeline_fa_dscp_config(app,
+ params->pipeline_id,
+ dscp_id,
+ tc_id,
+ color);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "action dscp");
-/*
- * Flow DiffServ Code Point (DSCP) translation table list
- *
- * p <pipeline ID> dscp ls
- */
+ return;
+ } /* action dscp */
+
+ /* action dscp ls */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "dscp") == 0) &&
+ (strcmp(tokens[1], "ls") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action dscp ls");
+ return;
+ }
-struct cmd_fa_dscp_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t dscp_string;
- cmdline_fixed_string_t ls_string;
-};
+ status = app_pipeline_fa_dscp_ls(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "action dscp ls");
-static void
-cmd_fa_dscp_ls_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fa_dscp_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ return;
+ } /* action dscp ls */
- status = app_pipeline_fa_dscp_ls(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
+ printf(CMD_MSG_FAIL, "action");
}
-cmdline_parse_token_string_t cmd_fa_dscp_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result,
- p_string, "p");
+static cmdline_parse_token_string_t cmd_action_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, p_string, "p");
-cmdline_parse_token_num_t cmd_fa_dscp_ls_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fa_dscp_ls_result,
- pipeline_id, UINT32);
+static cmdline_parse_token_num_t cmd_action_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_action_result, pipeline_id, UINT32);
-cmdline_parse_token_string_t cmd_fa_dscp_ls_dscp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result,
- dscp_string, "dscp");
+static cmdline_parse_token_string_t cmd_action_action_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, action_string, "action");
-cmdline_parse_token_string_t cmd_fa_dscp_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fa_dscp_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_action_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_inst_t cmd_fa_dscp_ls = {
- .f = cmd_fa_dscp_ls_parsed,
+cmdline_parse_inst_t cmd_action = {
+ .f = cmd_action_parsed,
.data = NULL,
- .help_str = "Flow DSCP translaton table list",
+ .help_str = "flow actions (meter, policer, policer stats, dscp table)",
.tokens = {
- (void *) &cmd_fa_dscp_ls_p_string,
- (void *) &cmd_fa_dscp_ls_pipeline_id,
- (void *) &cmd_fa_dscp_ls_dscp_string,
- (void *) &cmd_fa_dscp_ls_string,
+ (void *) &cmd_action_p_string,
+ (void *) &cmd_action_pipeline_id,
+ (void *) &cmd_action_action_string,
+ (void *) &cmd_action_multi_string,
NULL,
},
};
static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_fa_meter_config,
- (cmdline_parse_inst_t *) &cmd_fa_meter_config_bulk,
- (cmdline_parse_inst_t *) &cmd_fa_policer_config,
- (cmdline_parse_inst_t *) &cmd_fa_policer_config_bulk,
- (cmdline_parse_inst_t *) &cmd_fa_output_port_config,
- (cmdline_parse_inst_t *) &cmd_fa_output_port_config_bulk,
- (cmdline_parse_inst_t *) &cmd_fa_dscp_config,
- (cmdline_parse_inst_t *) &cmd_fa_policer_stats,
- (cmdline_parse_inst_t *) &cmd_fa_flow_ls,
- (cmdline_parse_inst_t *) &cmd_fa_dscp_ls,
+ (cmdline_parse_inst_t *) &cmd_action,
NULL,
};
static struct pipeline_fe_ops pipeline_flow_actions_fe_ops = {
.f_init = app_pipeline_fa_init,
+ .f_post_init = NULL,
.f_free = app_pipeline_fa_free,
+ .f_track = app_pipeline_track_default,
.cmds = pipeline_cmds,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions.h b/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
index f2cd0cbb..9c609741 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
@@ -73,6 +73,17 @@ app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
int clear,
struct pipeline_fa_policer_stats *stats);
+#ifndef APP_PIPELINE_FA_MAX_RECORDS_IN_FILE
+#define APP_PIPELINE_FA_MAX_RECORDS_IN_FILE 65536
+#endif
+
+int
+app_pipeline_fa_load_file(char *filename,
+ uint32_t *flow_ids,
+ struct pipeline_fa_flow_params *p,
+ uint32_t *n_flows,
+ uint32_t *line);
+
extern struct pipeline_type pipeline_flow_actions;
#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
index 3ad3ee63..11fcbb76 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
@@ -760,27 +760,6 @@ pipeline_fa_free(void *pipeline)
}
static int
-pipeline_fa_track(void *pipeline,
- __rte_unused uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_in == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-static int
pipeline_fa_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
@@ -1007,5 +986,4 @@ struct pipeline_be_ops pipeline_flow_actions_be_ops = {
.f_free = pipeline_fa_free,
.f_run = NULL,
.f_timer = pipeline_fa_timer,
- .f_track = pipeline_fa_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
index 19215748..9ef50cc9 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <string.h>
#include <sys/queue.h>
#include <netinet/in.h>
+#include <unistd.h>
#include <rte_common.h>
#include <rte_hexdump.h>
@@ -43,13 +44,12 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
#include "app.h"
#include "pipeline_common_fe.h"
#include "pipeline_flow_classification.h"
#include "hash_func.h"
+#include "parser.h"
/*
* Key conversion
@@ -96,9 +96,9 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
struct pkt_key_qinq *qinq = key_buffer;
qinq->ethertype_svlan = 0;
- qinq->svlan = rte_bswap16(key_in->key.qinq.svlan);
+ qinq->svlan = rte_cpu_to_be_16(key_in->key.qinq.svlan);
qinq->ethertype_cvlan = 0;
- qinq->cvlan = rte_bswap16(key_in->key.qinq.cvlan);
+ qinq->cvlan = rte_cpu_to_be_16(key_in->key.qinq.cvlan);
if (signature)
*signature = (uint32_t) hash_default_key8(qinq, 8, 0);
@@ -112,10 +112,10 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
ipv4->ttl = 0;
ipv4->proto = key_in->key.ipv4_5tuple.proto;
ipv4->checksum = 0;
- ipv4->ip_src = rte_bswap32(key_in->key.ipv4_5tuple.ip_src);
- ipv4->ip_dst = rte_bswap32(key_in->key.ipv4_5tuple.ip_dst);
- ipv4->port_src = rte_bswap16(key_in->key.ipv4_5tuple.port_src);
- ipv4->port_dst = rte_bswap16(key_in->key.ipv4_5tuple.port_dst);
+ ipv4->ip_src = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_src);
+ ipv4->ip_dst = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_dst);
+ ipv4->port_src = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_src);
+ ipv4->port_dst = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_dst);
if (signature)
*signature = (uint32_t) hash_default_key16(ipv4, 16, 0);
@@ -132,8 +132,8 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
ipv6->hop_limit = 0;
memcpy(&ipv6->ip_src, &key_in->key.ipv6_5tuple.ip_src, 16);
memcpy(&ipv6->ip_dst, &key_in->key.ipv6_5tuple.ip_dst, 16);
- ipv6->port_src = rte_bswap16(key_in->key.ipv6_5tuple.port_src);
- ipv6->port_dst = rte_bswap16(key_in->key.ipv6_5tuple.port_dst);
+ ipv6->port_src = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_src);
+ ipv6->port_dst = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_dst);
if (signature)
*signature = (uint32_t) hash_default_key64(ipv6, 64, 0);
@@ -278,6 +278,283 @@ app_pipeline_fc_key_check(struct pipeline_fc_key *key)
}
int
+app_pipeline_fc_load_file_qinq(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ uint16_t svlan, cvlan;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 7) ||
+ strcmp(tokens[0], "qinq") ||
+ parser_read_uint16(&svlan, tokens[1]) ||
+ parser_read_uint16(&cvlan, tokens[2]) ||
+ strcmp(tokens[3], "port") ||
+ parser_read_uint32(&portid, tokens[4]) ||
+ strcmp(tokens[5], "id") ||
+ parser_read_uint32(&flowid, tokens[6]))
+ goto error1;
+
+ keys[i].type = FLOW_KEY_QINQ;
+ keys[i].key.qinq.svlan = svlan;
+ keys[i].key.qinq.cvlan = cvlan;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error1;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error1:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+int
+app_pipeline_fc_load_file_ipv4(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ struct in_addr sipaddr, dipaddr;
+ uint16_t sport, dport;
+ uint8_t proto;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error2;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 10) ||
+ strcmp(tokens[0], "ipv4") ||
+ parse_ipv4_addr(tokens[1], &sipaddr) ||
+ parse_ipv4_addr(tokens[2], &dipaddr) ||
+ parser_read_uint16(&sport, tokens[3]) ||
+ parser_read_uint16(&dport, tokens[4]) ||
+ parser_read_uint8(&proto, tokens[5]) ||
+ strcmp(tokens[6], "port") ||
+ parser_read_uint32(&portid, tokens[7]) ||
+ strcmp(tokens[8], "id") ||
+ parser_read_uint32(&flowid, tokens[9]))
+ goto error2;
+
+ keys[i].type = FLOW_KEY_IPV4_5TUPLE;
+ keys[i].key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.port_src = sport;
+ keys[i].key.ipv4_5tuple.port_dst = dport;
+ keys[i].key.ipv4_5tuple.proto = proto;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error2;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error2:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+int
+app_pipeline_fc_load_file_ipv6(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ struct in6_addr sipaddr, dipaddr;
+ uint16_t sport, dport;
+ uint8_t proto;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error3;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 10) ||
+ strcmp(tokens[0], "ipv6") ||
+ parse_ipv6_addr(tokens[1], &sipaddr) ||
+ parse_ipv6_addr(tokens[2], &dipaddr) ||
+ parser_read_uint16(&sport, tokens[3]) ||
+ parser_read_uint16(&dport, tokens[4]) ||
+ parser_read_uint8(&proto, tokens[5]) ||
+ strcmp(tokens[6], "port") ||
+ parser_read_uint32(&portid, tokens[7]) ||
+ strcmp(tokens[8], "id") ||
+ parser_read_uint32(&flowid, tokens[9]))
+ goto error3;
+
+ keys[i].type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(keys[i].key.ipv6_5tuple.ip_src,
+ sipaddr.s6_addr,
+ sizeof(sipaddr.s6_addr));
+ memcpy(keys[i].key.ipv6_5tuple.ip_dst,
+ dipaddr.s6_addr,
+ sizeof(dipaddr.s6_addr));
+ keys[i].key.ipv6_5tuple.port_src = sport;
+ keys[i].key.ipv6_5tuple.port_dst = dport;
+ keys[i].key.ipv6_5tuple.proto = proto;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error3;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error3:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+
+
+int
app_pipeline_fc_add(struct app_params *app,
uint32_t pipeline_id,
struct pipeline_fc_key *key,
@@ -896,1315 +1173,728 @@ app_pipeline_fc_ls(struct app_params *app,
return 0;
}
-
/*
- * flow add qinq
- */
-
-struct cmd_fc_add_qinq_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t qinq_string;
- uint16_t svlan;
- uint16_t cvlan;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t flowid_string;
- uint32_t flow_id;
-};
-
-static void
-cmd_fc_add_qinq_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_qinq_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
-
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_QINQ;
- key.key.qinq.svlan = params->svlan;
- key.key.qinq.cvlan = params->cvlan;
-
- status = app_pipeline_fc_add(app,
- params->pipeline_id,
- &key,
- params->port,
- params->flow_id);
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, p_string, "p");
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flow_string,
- "flow");
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, add_string,
- "add");
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, qinq_string,
- "qinq");
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_svlan =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, svlan, UINT16);
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_cvlan =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, cvlan, UINT16);
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, port_string,
- "port");
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_port =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, port, UINT32);
-
-cmdline_parse_token_string_t cmd_fc_add_qinq_flowid_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_result, flowid_string,
- "flowid");
-
-cmdline_parse_token_num_t cmd_fc_add_qinq_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_result, flow_id, UINT32);
-
-cmdline_parse_inst_t cmd_fc_add_qinq = {
- .f = cmd_fc_add_qinq_parsed,
- .data = NULL,
- .help_str = "Flow add (Q-in-Q)",
- .tokens = {
- (void *) &cmd_fc_add_qinq_p_string,
- (void *) &cmd_fc_add_qinq_pipeline_id,
- (void *) &cmd_fc_add_qinq_flow_string,
- (void *) &cmd_fc_add_qinq_add_string,
- (void *) &cmd_fc_add_qinq_qinq_string,
- (void *) &cmd_fc_add_qinq_svlan,
- (void *) &cmd_fc_add_qinq_cvlan,
- (void *) &cmd_fc_add_qinq_port_string,
- (void *) &cmd_fc_add_qinq_port,
- (void *) &cmd_fc_add_qinq_flowid_string,
- (void *) &cmd_fc_add_qinq_flow_id,
- NULL,
- },
-};
-
-/*
- * flow add qinq all
+ * flow
+ *
+ * flow add:
+ * p <pipelineid> flow add qinq <svlan> <cvlan> port <portid> id <flowid>
+ * p <pipelineid> flow add qinq bulk <file>
+ * p <pipelineid> flow add ipv4 <sipaddr> <dipaddr> <sport> <dport> <proto> port <port ID> id <flowid>
+ * p <pipelineid> flow add ipv4 bulk <file>
+ * p <pipelineid> flow add ipv6 <sipaddr> <dipaddr> <sport> <dport> <proto> port <port ID> id <flowid>
+ * p <pipelineid> flow add ipv6 bulk <file>
+ *
+ * flow add default:
+ * p <pipelineid> flow add default <portid>
+ *
+ * flow del:
+ * p <pipelineid> flow del qinq <svlan> <cvlan>
+ * p <pipelineid> flow del ipv4 <sipaddr> <dipaddr> <sport> <dport> <proto>
+ * p <pipelineid> flow del ipv6 <sipaddr> <dipaddr> <sport> <dport> <proto>
+ *
+ * flow del default:
+ * p <pipelineid> flow del default
+ *
+ * flow ls:
+ * p <pipelineid> flow ls
*/
-struct cmd_fc_add_qinq_all_result {
+struct cmd_flow_result {
cmdline_fixed_string_t p_string;
uint32_t pipeline_id;
cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t qinq_string;
- cmdline_fixed_string_t all_string;
- uint32_t n_flows;
- uint32_t n_ports;
+ cmdline_multi_string_t multi_string;
};
-#ifndef N_FLOWS_BULK
-#define N_FLOWS_BULK 4096
-#endif
-
static void
-cmd_fc_add_qinq_all_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
+cmd_flow_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
void *data)
{
- struct cmd_fc_add_qinq_all_result *params = parsed_result;
+ struct cmd_flow_result *results = parsed_result;
struct app_params *app = data;
- struct pipeline_fc_key *key;
- uint32_t *port_id;
- uint32_t *flow_id;
- uint32_t id;
-
- /* Check input arguments */
- if (params->n_flows == 0) {
- printf("Invalid number of flows\n");
- return;
- }
- if (params->n_ports == 0) {
- printf("Invalid number of output ports\n");
- return;
- }
-
- /* Memory allocation */
- key = rte_zmalloc(NULL,
- N_FLOWS_BULK * sizeof(*key),
- RTE_CACHE_LINE_SIZE);
- if (key == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
-
- port_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*port_id),
- RTE_CACHE_LINE_SIZE);
- if (port_id == NULL) {
- rte_free(key);
- printf("Memory allocation failed\n");
- return;
- }
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
- flow_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*flow_id),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- rte_free(port_id);
- rte_free(key);
- printf("Memory allocation failed\n");
+ status = parse_tokenize_string(results->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "flow");
return;
}
- /* Flow add */
- for (id = 0; id < params->n_flows; id++) {
- uint32_t pos = id & (N_FLOWS_BULK - 1);
-
- key[pos].type = FLOW_KEY_QINQ;
- key[pos].key.qinq.svlan = id >> 12;
- key[pos].key.qinq.cvlan = id & 0xFFF;
-
- port_id[pos] = id % params->n_ports;
- flow_id[pos] = id;
-
- if ((pos == N_FLOWS_BULK - 1) ||
- (id == params->n_flows - 1)) {
- int status;
-
- status = app_pipeline_fc_add_bulk(app,
- params->pipeline_id,
- key,
- port_id,
- flow_id,
- pos + 1);
-
- if (status != 0) {
- printf("Command failed\n");
-
- break;
- }
+ /* flow add qinq */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ uint32_t svlan;
+ uint32_t cvlan;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 8) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add qinq");
+ return;
}
- }
-
- /* Memory free */
- rte_free(flow_id);
- rte_free(port_id);
- rte_free(key);
-}
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, p_string,
- "p");
+ if (parser_read_uint32(&svlan, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_qinq_all_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, pipeline_id,
- UINT32);
+ if (parser_read_uint32(&cvlan, tokens[3]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, flow_string,
- "flow");
+ if (strcmp(tokens[4], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, add_string,
- "add");
+ if (parser_read_uint32(&port_id, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, qinq_string,
- "qinq");
+ if (strcmp(tokens[6], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_qinq_all_all_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_qinq_all_result, all_string,
- "all");
+ if (parser_read_uint32(&flow_id, tokens[7]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_qinq_all_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, n_flows,
- UINT32);
+ key.type = FLOW_KEY_QINQ;
+ key.key.qinq.svlan = svlan;
+ key.key.qinq.cvlan = cvlan;
-cmdline_parse_token_num_t cmd_fc_add_qinq_all_n_ports =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_qinq_all_result, n_ports,
- UINT32);
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add qinq");
-cmdline_parse_inst_t cmd_fc_add_qinq_all = {
- .f = cmd_fc_add_qinq_all_parsed,
- .data = NULL,
- .help_str = "Flow add all (Q-in-Q)",
- .tokens = {
- (void *) &cmd_fc_add_qinq_all_p_string,
- (void *) &cmd_fc_add_qinq_all_pipeline_id,
- (void *) &cmd_fc_add_qinq_all_flow_string,
- (void *) &cmd_fc_add_qinq_all_add_string,
- (void *) &cmd_fc_add_qinq_all_qinq_string,
- (void *) &cmd_fc_add_qinq_all_all_string,
- (void *) &cmd_fc_add_qinq_all_n_flows,
- (void *) &cmd_fc_add_qinq_all_n_ports,
- NULL,
- },
-};
+ return;
+ } /* flow add qinq */
+
+ /* flow add ipv4 */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ struct in_addr sipaddr;
+ struct in_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 11) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv4");
+ return;
+ }
-/*
- * flow add ipv4_5tuple
- */
+ if (parse_ipv4_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv4addr");
+ return;
+ }
+ if (parse_ipv4_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv4addr");
+ return;
+ }
-struct cmd_fc_add_ipv4_5tuple_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv4_5tuple_string;
- cmdline_ipaddr_t ip_src;
- cmdline_ipaddr_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t flowid_string;
- uint32_t flow_id;
-};
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
-static void
-cmd_fc_add_ipv4_5tuple_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_ipv4_5tuple_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_IPV4_5TUPLE;
- key.key.ipv4_5tuple.ip_src = rte_bswap32(
- params->ip_src.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.ip_dst = rte_bswap32(
- params->ip_dst.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.port_src = params->port_src;
- key.key.ipv4_5tuple.port_dst = params->port_dst;
- key.key.ipv4_5tuple.proto = params->proto;
-
- status = app_pipeline_fc_add(app,
- params->pipeline_id,
- &key,
- params->port,
- params->flow_id);
- if (status != 0)
- printf("Command failed\n");
-}
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, p_string,
- "p");
+ if (strcmp(tokens[7], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, pipeline_id,
- UINT32);
+ if (parser_read_uint32(&port_id, tokens[8]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
- flow_string, "flow");
+ if (strcmp(tokens[9], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
- add_string, "add");
+ if (parser_read_uint32(&flow_id, tokens[10]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_ipv4_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
- ipv4_5tuple_string, "ipv4_5tuple");
+ key.type = FLOW_KEY_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.port_src = sport;
+ key.key.ipv4_5tuple.port_dst = dport;
+ key.key.ipv4_5tuple.proto = proto;
+
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv4");
-cmdline_parse_token_ipaddr_t cmd_fc_add_ipv4_5tuple_ip_src =
- TOKEN_IPV4_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, ip_src);
+ return;
+ } /* flow add ipv4 */
+
+ /* flow add ipv6 */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ struct in6_addr sipaddr;
+ struct in6_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 11) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv6");
+ return;
+ }
-cmdline_parse_token_ipaddr_t cmd_fc_add_ipv4_5tuple_ip_dst =
- TOKEN_IPV4_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, ip_dst);
+ if (parse_ipv6_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv6addr");
+ return;
+ }
+ if (parse_ipv6_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv6addr");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port_src =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_src,
- UINT16);
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port_dst =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_dst,
- UINT16);
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, proto,
- UINT32);
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port_string,
- "port");
+ if (strcmp(tokens[7], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_port =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, port,
- UINT32);
+ if (parser_read_uint32(&port_id, tokens[8]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_flowid_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result,
- flowid_string, "flowid");
+ if (strcmp(tokens[9], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_result, flow_id,
- UINT32);
+ if (parser_read_uint32(&flow_id, tokens[10]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
-cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple = {
- .f = cmd_fc_add_ipv4_5tuple_parsed,
- .data = NULL,
- .help_str = "Flow add (IPv4 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_add_ipv4_5tuple_p_string,
- (void *) &cmd_fc_add_ipv4_5tuple_pipeline_id,
- (void *) &cmd_fc_add_ipv4_5tuple_flow_string,
- (void *) &cmd_fc_add_ipv4_5tuple_add_string,
- (void *) &cmd_fc_add_ipv4_5tuple_ipv4_5tuple_string,
- (void *) &cmd_fc_add_ipv4_5tuple_ip_src,
- (void *) &cmd_fc_add_ipv4_5tuple_ip_dst,
- (void *) &cmd_fc_add_ipv4_5tuple_port_src,
- (void *) &cmd_fc_add_ipv4_5tuple_port_dst,
- (void *) &cmd_fc_add_ipv4_5tuple_proto,
- (void *) &cmd_fc_add_ipv4_5tuple_port_string,
- (void *) &cmd_fc_add_ipv4_5tuple_port,
- (void *) &cmd_fc_add_ipv4_5tuple_flowid_string,
- (void *) &cmd_fc_add_ipv4_5tuple_flow_id,
- NULL,
- },
-};
+ key.type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(key.key.ipv6_5tuple.ip_src, (void *)&sipaddr, 16);
+ memcpy(key.key.ipv6_5tuple.ip_dst, (void *)&dipaddr, 16);
+ key.key.ipv6_5tuple.port_src = sport;
+ key.key.ipv6_5tuple.port_dst = dport;
+ key.key.ipv6_5tuple.proto = proto;
+
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv6");
-/*
- * flow add ipv4_5tuple all
- */
+ return;
+ } /* flow add ipv6 */
+
+ /* flow add qinq bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add qinq bulk");
+ return;
+ }
-struct cmd_fc_add_ipv4_5tuple_all_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv4_5tuple_string;
- cmdline_fixed_string_t all_string;
- uint32_t n_flows;
- uint32_t n_ports;
-};
+ filename = tokens[3];
-static void
-cmd_fc_add_ipv4_5tuple_all_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_ipv4_5tuple_all_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key *key;
- uint32_t *port_id;
- uint32_t *flow_id;
- uint32_t id;
-
- /* Check input parameters */
- if (params->n_flows == 0) {
- printf("Invalid number of flows\n");
- return;
- }
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
- if (params->n_ports == 0) {
- printf("Invalid number of ports\n");
- return;
- }
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
- /* Memory allocation */
- key = rte_zmalloc(NULL,
- N_FLOWS_BULK * sizeof(*key),
- RTE_CACHE_LINE_SIZE);
- if (key == NULL) {
- printf("Memory allocation failed\n");
- return;
- }
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
- port_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*port_id),
- RTE_CACHE_LINE_SIZE);
- if (port_id == NULL) {
- rte_free(key);
- printf("Memory allocation failed\n");
- return;
- }
+ status = app_pipeline_fc_load_file_qinq(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
- flow_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*flow_id),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- rte_free(port_id);
- rte_free(key);
- printf("Memory allocation failed\n");
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add qinq bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
return;
- }
-
- /* Flow add */
- for (id = 0; id < params->n_flows; id++) {
- uint32_t pos = id & (N_FLOWS_BULK - 1);
+ } /* flow add qinq bulk */
+
+ /* flow add ipv4 bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv4 bulk");
+ return;
+ }
- key[pos].type = FLOW_KEY_IPV4_5TUPLE;
- key[pos].key.ipv4_5tuple.ip_src = 0;
- key[pos].key.ipv4_5tuple.ip_dst = id;
- key[pos].key.ipv4_5tuple.port_src = 0;
- key[pos].key.ipv4_5tuple.port_dst = 0;
- key[pos].key.ipv4_5tuple.proto = 6;
+ filename = tokens[3];
- port_id[pos] = id % params->n_ports;
- flow_id[pos] = id;
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
- if ((pos == N_FLOWS_BULK - 1) ||
- (id == params->n_flows - 1)) {
- int status;
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
- status = app_pipeline_fc_add_bulk(app,
- params->pipeline_id,
- key,
- port_id,
- flow_id,
- pos + 1);
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
- if (status != 0) {
- printf("Command failed\n");
+ status = app_pipeline_fc_load_file_ipv4(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
- break;
- }
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv4 bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ } /* flow add ipv4 bulk */
+
+ /* flow add ipv6 bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv6 bulk");
+ return;
}
- }
- /* Memory free */
- rte_free(flow_id);
- rte_free(port_id);
- rte_free(key);
-}
+ filename = tokens[3];
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- p_string, "p");
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- pipeline_id, UINT32);
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- flow_string, "flow");
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- add_string, "add");
+ status = app_pipeline_fc_load_file_ipv6(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_ipv4_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- ipv4_5tuple_string, "ipv4_5tuple");
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv6 bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ } /* flow add ipv6 bulk */
-cmdline_parse_token_string_t cmd_fc_add_ipv4_5tuple_all_all_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- all_string, "all");
+ /* flow add default*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- n_flows, UINT32);
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add default");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv4_5tuple_all_n_ports =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv4_5tuple_all_result,
- n_ports, UINT32);
+ if (parser_read_uint32(&port_id, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-cmdline_parse_inst_t cmd_fc_add_ipv4_5tuple_all = {
- .f = cmd_fc_add_ipv4_5tuple_all_parsed,
- .data = NULL,
- .help_str = "Flow add all (IPv4 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_add_ipv4_5tuple_all_p_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_pipeline_id,
- (void *) &cmd_fc_add_ipv4_5tuple_all_flow_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_add_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_ipv4_5tuple_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_all_string,
- (void *) &cmd_fc_add_ipv4_5tuple_all_n_flows,
- (void *) &cmd_fc_add_ipv4_5tuple_all_n_ports,
- NULL,
- },
-};
+ status = app_pipeline_fc_add_default(app,
+ results->pipeline_id,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add default");
-/*
- * flow add ipv6_5tuple
- */
+ return;
+ } /* flow add default */
-struct cmd_fc_add_ipv6_5tuple_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv6_5tuple_string;
- cmdline_ipaddr_t ip_src;
- cmdline_ipaddr_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t flowid_string;
- uint32_t flow_id;
-};
+ /* flow del qinq */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0)) {
+ struct pipeline_fc_key key;
+ uint32_t svlan;
+ uint32_t cvlan;
-static void
-cmd_fc_add_ipv6_5tuple_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_ipv6_5tuple_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
+ memset(&key, 0, sizeof(key));
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_IPV6_5TUPLE;
- memcpy(key.key.ipv6_5tuple.ip_src,
- params->ip_src.addr.ipv6.s6_addr,
- 16);
- memcpy(key.key.ipv6_5tuple.ip_dst,
- params->ip_dst.addr.ipv6.s6_addr,
- 16);
- key.key.ipv6_5tuple.port_src = params->port_src;
- key.key.ipv6_5tuple.port_dst = params->port_dst;
- key.key.ipv6_5tuple.proto = params->proto;
-
- status = app_pipeline_fc_add(app,
- params->pipeline_id,
- &key,
- params->port,
- params->flow_id);
- if (status != 0)
- printf("Command failed\n");
-}
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del qinq");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- p_string, "p");
+ if (parser_read_uint32(&svlan, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, pipeline_id,
- UINT32);
+ if (parser_read_uint32(&cvlan, tokens[3]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- flow_string, "flow");
+ key.type = FLOW_KEY_QINQ;
+ key.key.qinq.svlan = svlan;
+ key.key.qinq.cvlan = cvlan;
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- add_string, "add");
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del qinq");
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_ipv6_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- ipv6_5tuple_string, "ipv6_5tuple");
+ return;
+ } /* flow del qinq */
+
+ /* flow del ipv4 */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0)) {
+ struct pipeline_fc_key key;
+ struct in_addr sipaddr;
+ struct in_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 7) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del ipv4");
+ return;
+ }
-cmdline_parse_token_ipaddr_t cmd_fc_add_ipv6_5tuple_ip_src =
- TOKEN_IPV6_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, ip_src);
+ if (parse_ipv4_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv4addr");
+ return;
+ }
+ if (parse_ipv4_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv4addr");
+ return;
+ }
-cmdline_parse_token_ipaddr_t cmd_fc_add_ipv6_5tuple_ip_dst =
- TOKEN_IPV6_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, ip_dst);
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port_src =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port_src,
- UINT16);
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port_dst =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port_dst,
- UINT16);
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, proto,
- UINT32);
+ key.type = FLOW_KEY_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.port_src = sport;
+ key.key.ipv4_5tuple.port_dst = dport;
+ key.key.ipv4_5tuple.proto = proto;
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- port_string, "port");
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del ipv4");
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_port =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, port,
- UINT32);
+ return;
+ } /* flow del ipv4 */
+
+ /* flow del ipv6 */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0)) {
+ struct pipeline_fc_key key;
+ struct in6_addr sipaddr;
+ struct in6_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 7) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del ipv6");
+ return;
+ }
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_flowid_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result,
- flowid_string, "flowid");
+ if (parse_ipv6_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv6addr");
+ return;
+ }
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_flow_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_result, flow_id,
- UINT32);
+ if (parse_ipv6_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv6addr");
+ return;
+ }
-cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple = {
- .f = cmd_fc_add_ipv6_5tuple_parsed,
- .data = NULL,
- .help_str = "Flow add (IPv6 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_add_ipv6_5tuple_p_string,
- (void *) &cmd_fc_add_ipv6_5tuple_pipeline_id,
- (void *) &cmd_fc_add_ipv6_5tuple_flow_string,
- (void *) &cmd_fc_add_ipv6_5tuple_add_string,
- (void *) &cmd_fc_add_ipv6_5tuple_ipv6_5tuple_string,
- (void *) &cmd_fc_add_ipv6_5tuple_ip_src,
- (void *) &cmd_fc_add_ipv6_5tuple_ip_dst,
- (void *) &cmd_fc_add_ipv6_5tuple_port_src,
- (void *) &cmd_fc_add_ipv6_5tuple_port_dst,
- (void *) &cmd_fc_add_ipv6_5tuple_proto,
- (void *) &cmd_fc_add_ipv6_5tuple_port_string,
- (void *) &cmd_fc_add_ipv6_5tuple_port,
- (void *) &cmd_fc_add_ipv6_5tuple_flowid_string,
- (void *) &cmd_fc_add_ipv6_5tuple_flow_id,
- NULL,
- },
-};
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
-/*
- * flow add ipv6_5tuple all
- */
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
-struct cmd_fc_add_ipv6_5tuple_all_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t ipv6_5tuple_string;
- cmdline_fixed_string_t all_string;
- uint32_t n_flows;
- uint32_t n_ports;
-};
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
-static void
-cmd_fc_add_ipv6_5tuple_all_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_ipv6_5tuple_all_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key *key;
- uint32_t *port_id;
- uint32_t *flow_id;
- uint32_t id;
-
- /* Check input parameters */
- if (params->n_flows == 0) {
- printf("Invalid number of flows\n");
- return;
- }
+ key.type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(key.key.ipv6_5tuple.ip_src, &sipaddr, 16);
+ memcpy(key.key.ipv6_5tuple.ip_dst, &dipaddr, 16);
+ key.key.ipv6_5tuple.port_src = sport;
+ key.key.ipv6_5tuple.port_dst = dport;
+ key.key.ipv6_5tuple.proto = proto;
- if (params->n_ports == 0) {
- printf("Invalid number of ports\n");
- return;
- }
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del ipv6");
- /* Memory allocation */
- key = rte_zmalloc(NULL,
- N_FLOWS_BULK * sizeof(*key),
- RTE_CACHE_LINE_SIZE);
- if (key == NULL) {
- printf("Memory allocation failed\n");
return;
- }
+ } /* flow del ipv6 */
+
+ /* flow del default*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del default");
+ return;
+ }
- port_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*port_id),
- RTE_CACHE_LINE_SIZE);
- if (port_id == NULL) {
- rte_free(key);
- printf("Memory allocation failed\n");
- return;
- }
+ status = app_pipeline_fc_del_default(app,
+ results->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del default");
- flow_id = rte_malloc(NULL,
- N_FLOWS_BULK * sizeof(*flow_id),
- RTE_CACHE_LINE_SIZE);
- if (flow_id == NULL) {
- rte_free(port_id);
- rte_free(key);
- printf("Memory allocation failed\n");
return;
- }
-
- /* Flow add */
- for (id = 0; id < params->n_flows; id++) {
- uint32_t pos = id & (N_FLOWS_BULK - 1);
- uint32_t *x;
-
- key[pos].type = FLOW_KEY_IPV6_5TUPLE;
- x = (uint32_t *) key[pos].key.ipv6_5tuple.ip_dst;
- *x = rte_bswap32(id);
- key[pos].key.ipv6_5tuple.proto = 6;
-
- port_id[pos] = id % params->n_ports;
- flow_id[pos] = id;
+ } /* flow del default */
- if ((pos == N_FLOWS_BULK - 1) ||
- (id == params->n_flows - 1)) {
- int status;
-
- status = app_pipeline_fc_add_bulk(app,
- params->pipeline_id,
- key,
- port_id,
- flow_id,
- pos + 1);
-
- if (status != 0) {
- printf("Command failed\n");
-
- break;
- }
+ /* flow ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow ls");
+ return;
}
- }
-
- /* Memory free */
- rte_free(flow_id);
- rte_free(port_id);
- rte_free(key);
-}
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- flow_string, "flow");
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- add_string, "add");
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_ipv6_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- ipv6_5tuple_string, "ipv6_5tuple");
-
-cmdline_parse_token_string_t cmd_fc_add_ipv6_5tuple_all_all_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- all_string, "all");
-
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_n_flows =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- n_flows, UINT32);
-
-cmdline_parse_token_num_t cmd_fc_add_ipv6_5tuple_all_n_ports =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_ipv6_5tuple_all_result,
- n_ports, UINT32);
-
-cmdline_parse_inst_t cmd_fc_add_ipv6_5tuple_all = {
- .f = cmd_fc_add_ipv6_5tuple_all_parsed,
- .data = NULL,
- .help_str = "Flow add all (ipv6 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_add_ipv6_5tuple_all_p_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_pipeline_id,
- (void *) &cmd_fc_add_ipv6_5tuple_all_flow_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_add_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_ipv6_5tuple_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_all_string,
- (void *) &cmd_fc_add_ipv6_5tuple_all_n_flows,
- (void *) &cmd_fc_add_ipv6_5tuple_all_n_ports,
- NULL,
- },
-};
-
-/*
- * flow del qinq
- */
-struct cmd_fc_del_qinq_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t qinq_string;
- uint16_t svlan;
- uint16_t cvlan;
-};
-
-static void
-cmd_fc_del_qinq_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_del_qinq_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
-
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_QINQ;
- key.key.qinq.svlan = params->svlan;
- key.key.qinq.cvlan = params->cvlan;
- status = app_pipeline_fc_del(app, params->pipeline_id, &key);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_del_qinq_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, p_string, "p");
-
-cmdline_parse_token_num_t cmd_fc_del_qinq_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_fc_del_qinq_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, flow_string,
- "flow");
-
-cmdline_parse_token_string_t cmd_fc_del_qinq_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, del_string,
- "del");
-
-cmdline_parse_token_string_t cmd_fc_del_qinq_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_qinq_result, qinq_string,
- "qinq");
-
-cmdline_parse_token_num_t cmd_fc_del_qinq_svlan =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, svlan, UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_qinq_cvlan =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_qinq_result, cvlan, UINT16);
-
-cmdline_parse_inst_t cmd_fc_del_qinq = {
- .f = cmd_fc_del_qinq_parsed,
- .data = NULL,
- .help_str = "Flow delete (Q-in-Q)",
- .tokens = {
- (void *) &cmd_fc_del_qinq_p_string,
- (void *) &cmd_fc_del_qinq_pipeline_id,
- (void *) &cmd_fc_del_qinq_flow_string,
- (void *) &cmd_fc_del_qinq_del_string,
- (void *) &cmd_fc_del_qinq_qinq_string,
- (void *) &cmd_fc_del_qinq_svlan,
- (void *) &cmd_fc_del_qinq_cvlan,
- NULL,
- },
-};
-
-/*
- * flow del ipv4_5tuple
- */
-
-struct cmd_fc_del_ipv4_5tuple_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t ipv4_5tuple_string;
- cmdline_ipaddr_t ip_src;
- cmdline_ipaddr_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
-};
-
-static void
-cmd_fc_del_ipv4_5tuple_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_del_ipv4_5tuple_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
-
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_IPV4_5TUPLE;
- key.key.ipv4_5tuple.ip_src = rte_bswap32(
- params->ip_src.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.ip_dst = rte_bswap32(
- params->ip_dst.addr.ipv4.s_addr);
- key.key.ipv4_5tuple.port_src = params->port_src;
- key.key.ipv4_5tuple.port_dst = params->port_dst;
- key.key.ipv4_5tuple.proto = params->proto;
-
- status = app_pipeline_fc_del(app, params->pipeline_id, &key);
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- p_string, "p");
-cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- pipeline_id, UINT32);
+ status = app_pipeline_fc_ls(app, results->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow ls");
-cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- flow_string, "flow");
-
-cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- del_string, "del");
-
-cmdline_parse_token_string_t cmd_fc_del_ipv4_5tuple_ipv4_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- ipv4_5tuple_string, "ipv4_5tuple");
-
-cmdline_parse_token_ipaddr_t cmd_fc_del_ipv4_5tuple_ip_src =
- TOKEN_IPV4_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- ip_src);
-
-cmdline_parse_token_ipaddr_t cmd_fc_del_ipv4_5tuple_ip_dst =
- TOKEN_IPV4_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result, ip_dst);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_port_src =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- port_src, UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_port_dst =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- port_dst, UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv4_5tuple_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv4_5tuple_result,
- proto, UINT32);
-
-cmdline_parse_inst_t cmd_fc_del_ipv4_5tuple = {
- .f = cmd_fc_del_ipv4_5tuple_parsed,
- .data = NULL,
- .help_str = "Flow delete (IPv4 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_del_ipv4_5tuple_p_string,
- (void *) &cmd_fc_del_ipv4_5tuple_pipeline_id,
- (void *) &cmd_fc_del_ipv4_5tuple_flow_string,
- (void *) &cmd_fc_del_ipv4_5tuple_del_string,
- (void *) &cmd_fc_del_ipv4_5tuple_ipv4_5tuple_string,
- (void *) &cmd_fc_del_ipv4_5tuple_ip_src,
- (void *) &cmd_fc_del_ipv4_5tuple_ip_dst,
- (void *) &cmd_fc_del_ipv4_5tuple_port_src,
- (void *) &cmd_fc_del_ipv4_5tuple_port_dst,
- (void *) &cmd_fc_del_ipv4_5tuple_proto,
- NULL,
- },
-};
-
-/*
- * flow del ipv6_5tuple
- */
-
-struct cmd_fc_del_ipv6_5tuple_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t ipv6_5tuple_string;
- cmdline_ipaddr_t ip_src;
- cmdline_ipaddr_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
-};
-
-static void
-cmd_fc_del_ipv6_5tuple_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_del_ipv6_5tuple_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_fc_key key;
- int status;
-
- memset(&key, 0, sizeof(key));
- key.type = FLOW_KEY_IPV6_5TUPLE;
- memcpy(key.key.ipv6_5tuple.ip_src,
- params->ip_src.addr.ipv6.s6_addr,
- 16);
- memcpy(key.key.ipv6_5tuple.ip_dst,
- params->ip_dst.addr.ipv6.s6_addr,
- 16);
- key.key.ipv6_5tuple.port_src = params->port_src;
- key.key.ipv6_5tuple.port_dst = params->port_dst;
- key.key.ipv6_5tuple.proto = params->proto;
-
- status = app_pipeline_fc_del(app, params->pipeline_id, &key);
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- p_string, "p");
-
-cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- pipeline_id, UINT32);
-
-cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- flow_string, "flow");
-
-cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- del_string, "del");
-
-cmdline_parse_token_string_t cmd_fc_del_ipv6_5tuple_ipv6_5tuple_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result,
- ipv6_5tuple_string, "ipv6_5tuple");
-
-cmdline_parse_token_ipaddr_t cmd_fc_del_ipv6_5tuple_ip_src =
- TOKEN_IPV6_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, ip_src);
-
-cmdline_parse_token_ipaddr_t cmd_fc_del_ipv6_5tuple_ip_dst =
- TOKEN_IPV6_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, ip_dst);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_port_src =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, port_src,
- UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_port_dst =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, port_dst,
- UINT16);
-
-cmdline_parse_token_num_t cmd_fc_del_ipv6_5tuple_proto =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_ipv6_5tuple_result, proto,
- UINT32);
-
-cmdline_parse_inst_t cmd_fc_del_ipv6_5tuple = {
- .f = cmd_fc_del_ipv6_5tuple_parsed,
- .data = NULL,
- .help_str = "Flow delete (IPv6 5-tuple)",
- .tokens = {
- (void *) &cmd_fc_del_ipv6_5tuple_p_string,
- (void *) &cmd_fc_del_ipv6_5tuple_pipeline_id,
- (void *) &cmd_fc_del_ipv6_5tuple_flow_string,
- (void *) &cmd_fc_del_ipv6_5tuple_del_string,
- (void *) &cmd_fc_del_ipv6_5tuple_ipv6_5tuple_string,
- (void *) &cmd_fc_del_ipv6_5tuple_ip_src,
- (void *) &cmd_fc_del_ipv6_5tuple_ip_dst,
- (void *) &cmd_fc_del_ipv6_5tuple_port_src,
- (void *) &cmd_fc_del_ipv6_5tuple_port_dst,
- (void *) &cmd_fc_del_ipv6_5tuple_proto,
- NULL,
- },
-};
-
-/*
- * flow add default
- */
-
-struct cmd_fc_add_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t default_string;
- uint32_t port;
-};
-
-static void
-cmd_fc_add_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_add_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_fc_add_default(app, params->pipeline_id,
- params->port);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_add_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, p_string,
- "p");
-
-cmdline_parse_token_num_t cmd_fc_add_default_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_default_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_fc_add_default_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, flow_string,
- "flow");
-
-cmdline_parse_token_string_t cmd_fc_add_default_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result, add_string,
- "add");
-
-cmdline_parse_token_string_t cmd_fc_add_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_add_default_result,
- default_string, "default");
-
-cmdline_parse_token_num_t cmd_fc_add_default_port =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_add_default_result, port, UINT32);
-
-cmdline_parse_inst_t cmd_fc_add_default = {
- .f = cmd_fc_add_default_parsed,
- .data = NULL,
- .help_str = "Flow add default",
- .tokens = {
- (void *) &cmd_fc_add_default_p_string,
- (void *) &cmd_fc_add_default_pipeline_id,
- (void *) &cmd_fc_add_default_flow_string,
- (void *) &cmd_fc_add_default_add_string,
- (void *) &cmd_fc_add_default_default_string,
- (void *) &cmd_fc_add_default_port,
- NULL,
- },
-};
-
-/*
- * flow del default
- */
-
-struct cmd_fc_del_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t default_string;
-};
-
-static void
-cmd_fc_del_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_del_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_fc_del_default(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
-}
-
-cmdline_parse_token_string_t cmd_fc_del_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, p_string,
- "p");
-
-cmdline_parse_token_num_t cmd_fc_del_default_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_del_default_result, pipeline_id,
- UINT32);
-
-cmdline_parse_token_string_t cmd_fc_del_default_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, flow_string,
- "flow");
-
-cmdline_parse_token_string_t cmd_fc_del_default_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result, del_string,
- "del");
-
-cmdline_parse_token_string_t cmd_fc_del_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_del_default_result,
- default_string, "default");
-
-cmdline_parse_inst_t cmd_fc_del_default = {
- .f = cmd_fc_del_default_parsed,
- .data = NULL,
- .help_str = "Flow delete default",
- .tokens = {
- (void *) &cmd_fc_del_default_p_string,
- (void *) &cmd_fc_del_default_pipeline_id,
- (void *) &cmd_fc_del_default_flow_string,
- (void *) &cmd_fc_del_default_del_string,
- (void *) &cmd_fc_del_default_default_string,
- NULL,
- },
-};
-
-/*
- * flow ls
- */
-
-struct cmd_fc_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_fixed_string_t ls_string;
-};
-
-static void
-cmd_fc_ls_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_fc_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ return;
+ } /* flow ls */
- status = app_pipeline_fc_ls(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
+ printf(CMD_MSG_MISMATCH_ARGS, "flow");
}
-cmdline_parse_token_string_t cmd_fc_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result, p_string, "p");
+static cmdline_parse_token_string_t cmd_flow_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, p_string, "p");
-cmdline_parse_token_num_t cmd_fc_ls_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_fc_ls_result, pipeline_id, UINT32);
+static cmdline_parse_token_num_t cmd_flow_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_result, pipeline_id, UINT32);
-cmdline_parse_token_string_t cmd_fc_ls_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result,
- flow_string, "flow");
+static cmdline_parse_token_string_t cmd_flow_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, flow_string, "flow");
-cmdline_parse_token_string_t cmd_fc_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_fc_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_flow_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, multi_string,
+ TOKEN_STRING_MULTI);
-cmdline_parse_inst_t cmd_fc_ls = {
- .f = cmd_fc_ls_parsed,
+static cmdline_parse_inst_t cmd_flow = {
+ .f = cmd_flow_parsed,
.data = NULL,
- .help_str = "Flow list",
+ .help_str = "flow add / add bulk / add default / del / del default / ls",
.tokens = {
- (void *) &cmd_fc_ls_p_string,
- (void *) &cmd_fc_ls_pipeline_id,
- (void *) &cmd_fc_ls_flow_string,
- (void *) &cmd_fc_ls_ls_string,
+ (void *) &cmd_flow_p_string,
+ (void *) &cmd_flow_pipeline_id,
+ (void *) &cmd_flow_flow_string,
+ (void *) &cmd_flow_multi_string,
NULL,
},
};
static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_fc_add_qinq,
- (cmdline_parse_inst_t *) &cmd_fc_add_ipv4_5tuple,
- (cmdline_parse_inst_t *) &cmd_fc_add_ipv6_5tuple,
-
- (cmdline_parse_inst_t *) &cmd_fc_del_qinq,
- (cmdline_parse_inst_t *) &cmd_fc_del_ipv4_5tuple,
- (cmdline_parse_inst_t *) &cmd_fc_del_ipv6_5tuple,
-
- (cmdline_parse_inst_t *) &cmd_fc_add_default,
- (cmdline_parse_inst_t *) &cmd_fc_del_default,
-
- (cmdline_parse_inst_t *) &cmd_fc_add_qinq_all,
- (cmdline_parse_inst_t *) &cmd_fc_add_ipv4_5tuple_all,
- (cmdline_parse_inst_t *) &cmd_fc_add_ipv6_5tuple_all,
-
- (cmdline_parse_inst_t *) &cmd_fc_ls,
+ (cmdline_parse_inst_t *) &cmd_flow,
NULL,
};
static struct pipeline_fe_ops pipeline_flow_classification_fe_ops = {
.f_init = app_pipeline_fc_init,
+ .f_post_init = NULL,
.f_free = app_pipeline_fc_free,
+ .f_track = app_pipeline_track_default,
.cmds = pipeline_cmds,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
index 9c775006..6c5ed384 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
@@ -102,6 +102,34 @@ int
app_pipeline_fc_del_default(struct app_params *app,
uint32_t pipeline_id);
+#ifndef APP_PIPELINE_FC_MAX_FLOWS_IN_FILE
+#define APP_PIPELINE_FC_MAX_FLOWS_IN_FILE (16 * 1024 * 1024)
+#endif
+
+int
+app_pipeline_fc_load_file_qinq(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
+int
+app_pipeline_fc_load_file_ipv4(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
+int
+app_pipeline_fc_load_file_ipv6(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
extern struct pipeline_type pipeline_flow_classification;
#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
index 70d976d5..8a762bc7 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -643,27 +643,6 @@ pipeline_fc_free(void *pipeline)
}
static int
-pipeline_fc_track(void *pipeline,
- __rte_unused uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_in == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-static int
pipeline_fc_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
@@ -807,5 +786,4 @@ struct pipeline_be_ops pipeline_flow_classification_be_ops = {
.f_free = pipeline_fc_free,
.f_run = NULL,
.f_timer = pipeline_fc_timer,
- .f_track = pipeline_fc_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_master.c b/examples/ip_pipeline/pipeline/pipeline_master.c
index 1ccdad14..aab58a27 100644
--- a/examples/ip_pipeline/pipeline/pipeline_master.c
+++ b/examples/ip_pipeline/pipeline/pipeline_master.c
@@ -36,7 +36,9 @@
static struct pipeline_fe_ops pipeline_master_fe_ops = {
.f_init = NULL,
+ .f_post_init = NULL,
.f_free = NULL,
+ .f_track = NULL,
.cmds = NULL,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_master_be.c b/examples/ip_pipeline/pipeline/pipeline_master_be.c
index ac0cbbc5..9a7c8c13 100644
--- a/examples/ip_pipeline/pipeline/pipeline_master_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_master_be.c
@@ -48,6 +48,7 @@
struct pipeline_master {
struct app_params *app;
struct cmdline *cl;
+ int post_init_done;
int script_file_done;
} __rte_cache_aligned;
@@ -77,6 +78,7 @@ pipeline_init(__rte_unused struct pipeline_params *params, void *arg)
return NULL;
}
+ p->post_init_done = 0;
p->script_file_done = 0;
if (app->script_file == NULL)
p->script_file_done = 1;
@@ -102,8 +104,20 @@ static int
pipeline_run(void *pipeline)
{
struct pipeline_master *p = (struct pipeline_master *) pipeline;
+ struct app_params *app = p->app;
int status;
+#ifdef RTE_LIBRTE_KNI
+ uint32_t i;
+#endif /* RTE_LIBRTE_KNI */
+ /* Application post-init phase */
+ if (p->post_init_done == 0) {
+ app_post_init(app);
+
+ p->post_init_done = 1;
+ }
+
+ /* Run startup script file */
if (p->script_file_done == 0) {
struct app_params *app = p->app;
int fd = open(app->script_file, O_RDONLY);
@@ -124,6 +138,7 @@ pipeline_run(void *pipeline)
p->script_file_done = 1;
}
+ /* Command Line Interface (CLI) */
status = cmdline_poll(p->cl);
if (status < 0)
rte_panic("CLI poll error (%" PRId32 ")\n", status);
@@ -132,6 +147,12 @@ pipeline_run(void *pipeline)
rte_exit(0, "Bye!\n");
}
+#ifdef RTE_LIBRTE_KNI
+ /* Handle KNI requests from Linux kernel */
+ for (i = 0; i < app->n_pktq_kni; i++)
+ rte_kni_handle_request(app->kni[i]);
+#endif /* RTE_LIBRTE_KNI */
+
return 0;
}
@@ -146,5 +167,4 @@ struct pipeline_be_ops pipeline_master_be_ops = {
.f_free = pipeline_free,
.f_run = pipeline_run,
.f_timer = pipeline_timer,
- .f_track = NULL,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough.c b/examples/ip_pipeline/pipeline/pipeline_passthrough.c
index fc2cae5e..63ce1472 100644
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough.c
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough.c
@@ -34,9 +34,36 @@
#include "pipeline_passthrough.h"
#include "pipeline_passthrough_be.h"
+static int
+app_pipeline_passthrough_track(struct pipeline_params *p,
+ uint32_t port_in,
+ uint32_t *port_out)
+{
+ struct pipeline_passthrough_params pp;
+ int status;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ status = pipeline_passthrough_parse_args(&pp, p);
+ if (status)
+ return -1;
+
+ if (pp.lb_hash_enabled)
+ return -1;
+
+ *port_out = port_in / (p->n_ports_in / p->n_ports_out);
+ return 0;
+}
+
static struct pipeline_fe_ops pipeline_passthrough_fe_ops = {
.f_init = NULL,
+ .f_post_init = NULL,
.f_free = NULL,
+ .f_track = app_pipeline_passthrough_track,
.cmds = NULL,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
index a0d11aea..6146a28f 100644
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
@@ -547,6 +547,18 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
"dma_src_mask", dma_mask_str);
}
+ if (p->lb_hash_enabled)
+ PIPELINE_ARG_CHECK((params->n_ports_out > 1),
+ "Parse error in section \"%s\": entry \"lb\" not "
+ "allowed for single output port pipeline",
+ params->name);
+ else
+ PIPELINE_ARG_CHECK(((params->n_ports_in >= params->n_ports_out)
+ && ((params->n_ports_in % params->n_ports_out) == 0)),
+ "Parse error in section \"%s\": n_ports_in needs to be "
+ "a multiple of n_ports_out (lb mode disabled)",
+ params->name);
+
return 0;
}
@@ -579,9 +591,7 @@ pipeline_passthrough_init(struct pipeline_params *params,
/* Check input arguments */
if ((params == NULL) ||
(params->n_ports_in == 0) ||
- (params->n_ports_out == 0) ||
- (params->n_ports_in < params->n_ports_out) ||
- (params->n_ports_in % params->n_ports_out))
+ (params->n_ports_out == 0))
return NULL;
/* Memory allocation */
@@ -702,10 +712,13 @@ pipeline_passthrough_init(struct pipeline_params *params,
/* Add entries to tables */
for (i = 0; i < p->n_ports_in; i++) {
+ uint32_t port_out_id = (p_pt->params.lb_hash_enabled == 0) ?
+ (i / (p->n_ports_in / p->n_ports_out)) :
+ 0;
+
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[
- i / (p->n_ports_in / p->n_ports_out)]},
+ {.port_id = p->port_out_id[port_out_id]},
};
struct rte_pipeline_table_entry *default_entry_ptr;
@@ -780,25 +793,9 @@ pipeline_passthrough_timer(void *pipeline)
return 0;
}
-static int
-pipeline_passthrough_track(void *pipeline, uint32_t port_in, uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- *port_out = port_in / p->n_ports_in;
- return 0;
-}
-
struct pipeline_be_ops pipeline_passthrough_be_ops = {
.f_init = pipeline_passthrough_init,
.f_free = pipeline_passthrough_free,
.f_run = NULL,
.f_timer = pipeline_passthrough_timer,
- .f_track = pipeline_passthrough_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.c b/examples/ip_pipeline/pipeline/pipeline_routing.c
index eab89f2e..3aadbf91 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,12 +34,11 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
#include "app.h"
#include "pipeline_common_fe.h"
#include "pipeline_routing.h"
+#include "parser.h"
struct app_pipeline_routing_route {
struct pipeline_routing_route_key key;
@@ -59,8 +58,14 @@ struct app_pipeline_routing_arp_entry {
struct pipeline_routing {
/* Parameters */
+ struct app_params *app;
+ uint32_t pipeline_id;
uint32_t n_ports_in;
uint32_t n_ports_out;
+ struct pipeline_routing_params rp;
+
+ /* Links */
+ uint32_t link_id[PIPELINE_MAX_PORT_OUT];
/* Routes */
TAILQ_HEAD(, app_pipeline_routing_route) routes;
@@ -79,12 +84,151 @@ struct pipeline_routing {
void *default_arp_entry_ptr;
};
+static int
+app_pipeline_routing_find_link(struct pipeline_routing *p,
+ uint32_t link_id,
+ uint32_t *port_id)
+{
+ uint32_t i;
+
+ for (i = 0; i < p->n_ports_out; i++)
+ if (p->link_id[i] == link_id) {
+ *port_id = i;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void
+app_pipeline_routing_link_op(__rte_unused struct app_params *app,
+ uint32_t link_id,
+ uint32_t up,
+ void *arg)
+{
+ struct pipeline_routing_route_key key0, key1;
+ struct pipeline_routing *p = arg;
+ struct app_link_params *lp;
+ uint32_t port_id, netmask;
+ int status;
+
+ if (app == NULL)
+ return;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, lp);
+ if (lp == NULL)
+ return;
+
+ status = app_pipeline_routing_find_link(p,
+ link_id,
+ &port_id);
+ if (status)
+ return;
+
+ netmask = (~0U) << (32 - lp->depth);
+
+ /* Local network (directly attached network) */
+ key0.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key0.key.ipv4.ip = lp->ip & netmask;
+ key0.key.ipv4.depth = lp->depth;
+
+ /* Local termination */
+ key1.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key1.key.ipv4.ip = lp->ip;
+ key1.key.ipv4.depth = 32;
+
+ if (up) {
+ struct pipeline_routing_route_data data0, data1;
+
+ /* Local network (directly attached network) */
+ memset(&data0, 0, sizeof(data0));
+ data0.flags = PIPELINE_ROUTING_ROUTE_LOCAL |
+ PIPELINE_ROUTING_ROUTE_ARP;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ)
+ data0.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) {
+ data0.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ data0.l2.mpls.n_labels = 1;
+ }
+ data0.port_id = port_id;
+
+ if (p->rp.n_arp_entries)
+ app_pipeline_routing_add_route(app,
+ p->pipeline_id,
+ &key0,
+ &data0);
+
+ /* Local termination */
+ memset(&data1, 0, sizeof(data1));
+ data1.flags = PIPELINE_ROUTING_ROUTE_LOCAL |
+ PIPELINE_ROUTING_ROUTE_ARP;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ)
+ data1.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) {
+ data1.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ data1.l2.mpls.n_labels = 1;
+ }
+ data1.port_id = p->rp.port_local_dest;
+
+ app_pipeline_routing_add_route(app,
+ p->pipeline_id,
+ &key1,
+ &data1);
+ } else {
+ /* Local network (directly attached network) */
+ if (p->rp.n_arp_entries)
+ app_pipeline_routing_delete_route(app,
+ p->pipeline_id,
+ &key0);
+
+ /* Local termination */
+ app_pipeline_routing_delete_route(app,
+ p->pipeline_id,
+ &key1);
+ }
+}
+
+static int
+app_pipeline_routing_set_link_op(
+ struct app_params *app,
+ struct pipeline_routing *p)
+{
+ uint32_t port_id;
+
+ for (port_id = 0; port_id < p->n_ports_out; port_id++) {
+ struct app_link_params *link;
+ uint32_t link_id;
+ int status;
+
+ link = app_pipeline_track_pktq_out_to_link(app,
+ p->pipeline_id,
+ port_id);
+ if (link == NULL)
+ continue;
+
+ link_id = link - app->link_params;
+ p->link_id[port_id] = link_id;
+
+ status = app_link_set_op(app,
+ link_id,
+ p->pipeline_id,
+ app_pipeline_routing_link_op,
+ (void *) p);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
static void *
-pipeline_routing_init(struct pipeline_params *params,
- __rte_unused void *arg)
+app_pipeline_routing_init(struct pipeline_params *params,
+ void *arg)
{
+ struct app_params *app = (struct app_params *) arg;
struct pipeline_routing *p;
- uint32_t size;
+ uint32_t pipeline_id, size;
+ int status;
/* Check input arguments */
if ((params == NULL) ||
@@ -92,6 +236,8 @@ pipeline_routing_init(struct pipeline_params *params,
(params->n_ports_out == 0))
return NULL;
+ APP_PARAM_GET_ID(params, "PIPELINE", pipeline_id);
+
/* Memory allocation */
size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_routing));
p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
@@ -99,19 +245,40 @@ pipeline_routing_init(struct pipeline_params *params,
return NULL;
/* Initialization */
+ p->app = app;
+ p->pipeline_id = pipeline_id;
p->n_ports_in = params->n_ports_in;
p->n_ports_out = params->n_ports_out;
+ status = pipeline_routing_parse_args(&p->rp, params);
+ if (status) {
+ rte_free(p);
+ return NULL;
+ }
TAILQ_INIT(&p->routes);
p->n_routes = 0;
TAILQ_INIT(&p->arp_entries);
p->n_arp_entries = 0;
+ app_pipeline_routing_set_link_op(app, p);
+
return p;
}
static int
+app_pipeline_routing_post_init(void *pipeline)
+{
+ struct pipeline_routing *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ return app_pipeline_routing_set_macaddr(p->app, p->pipeline_id);
+}
+
+static int
app_pipeline_routing_free(void *pipeline)
{
struct pipeline_routing *p = pipeline;
@@ -198,7 +365,9 @@ print_route(const struct app_pipeline_routing_route *route)
key->depth,
route->data.port_id);
- if (route->data.flags & PIPELINE_ROUTING_ROUTE_ARP)
+ if (route->data.flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ printf(", Local");
+ else if (route->data.flags & PIPELINE_ROUTING_ROUTE_ARP)
printf(
", Next Hop IP = %" PRIu32 ".%" PRIu32
".%" PRIu32 ".%" PRIu32,
@@ -383,8 +552,6 @@ app_pipeline_routing_add_route(struct app_params *app,
p->n_routes++;
}
- print_route(entry);
-
/* Message buffer free */
app_msg_free(app, rsp);
return 0;
@@ -677,8 +844,6 @@ app_pipeline_routing_add_arp_entry(struct app_params *app, uint32_t pipeline_id,
p->n_arp_entries++;
}
- print_arp_entry(entry);
-
/* Message buffer free */
app_msg_free(app, rsp);
return 0;
@@ -853,1382 +1018,600 @@ app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
return 0;
}
-static int
-parse_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+int
+app_pipeline_routing_set_macaddr(struct app_params *app,
+ uint32_t pipeline_id)
{
- uint32_t n_max_labels = *n_labels, count = 0;
-
- /* Check for void list of labels */
- if (strcmp(string, "<void>") == 0) {
- *n_labels = 0;
- return 0;
- }
+ struct app_pipeline_params *p;
+ struct pipeline_routing_set_macaddr_msg_req *req;
+ struct pipeline_routing_set_macaddr_msg_rsp *rsp;
+ uint32_t port_id;
- /* At least one label should be present */
- for ( ; (*string != '\0'); ) {
- char *next;
- int value;
+ /* Check input arguments */
+ if (app == NULL)
+ return -EINVAL;
- if (count >= n_max_labels)
- return -1;
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -EINVAL;
- if (count > 0) {
- if (string[0] != ':')
- return -1;
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -ENOMEM;
- string++;
- }
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_SET_MACADDR;
- value = strtol(string, &next, 10);
- if (next == string)
- return -1;
- string = next;
+ memset(req->macaddr, 0, sizeof(req->macaddr));
+ for (port_id = 0; port_id < p->n_pktq_out; port_id++) {
+ struct app_link_params *link;
- labels[count++] = (uint32_t) value;
+ link = app_pipeline_track_pktq_out_to_link(app,
+ pipeline_id,
+ port_id);
+ if (link)
+ req->macaddr[port_id] = link->mac_addr;
}
- *n_labels = count;
- return 0;
-}
-
-/*
- * route add (mpls = no, qinq = no, arp = no)
- */
-
-struct cmd_route_add1_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- struct ether_addr macaddr;
-};
-
-static void
-cmd_route_add1_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add1_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- int status;
-
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = 0;
- route_data.port_id = params->port;
- route_data.ethernet.macaddr = params->macaddr;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -ETIMEDOUT;
- if (status != 0) {
- printf("Command failed\n");
- return;
+ /* Read response and write entry */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return rsp->status;
}
-}
-
-static cmdline_parse_token_string_t cmd_route_add1_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add1_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, p, UINT32);
-static cmdline_parse_token_string_t cmd_route_add1_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add1_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add1_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add1_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_add1_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, depth, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add1_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, port_string,
- "port");
-
-static cmdline_parse_token_num_t cmd_route_add1_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add1_result, port, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add1_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add1_result, ether_string,
- "ether");
-
-static cmdline_parse_token_etheraddr_t cmd_route_add1_macaddr =
- TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add1_result, macaddr);
-
-static cmdline_parse_inst_t cmd_route_add1 = {
- .f = cmd_route_add1_parsed,
- .data = NULL,
- .help_str = "Route add (mpls = no, qinq = no, arp = no)",
- .tokens = {
- (void *)&cmd_route_add1_p_string,
- (void *)&cmd_route_add1_p,
- (void *)&cmd_route_add1_route_string,
- (void *)&cmd_route_add1_add_string,
- (void *)&cmd_route_add1_ip,
- (void *)&cmd_route_add1_depth,
- (void *)&cmd_route_add1_port_string,
- (void *)&cmd_route_add1_port,
- (void *)&cmd_route_add1_ether_string,
- (void *)&cmd_route_add1_macaddr,
- NULL,
- },
-};
-
-/*
- * route add (mpls = no, qinq = no, arp = yes)
- */
-
-struct cmd_route_add2_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- cmdline_ipaddr_t nh_ip;
-};
-
-static void
-cmd_route_add2_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add2_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- int status;
-
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = PIPELINE_ROUTING_ROUTE_ARP;
- route_data.port_id = params->port;
- route_data.ethernet.ip =
- rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
+ /* Free response */
+ app_msg_free(app, rsp);
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
+ return 0;
}
-static cmdline_parse_token_string_t cmd_route_add2_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add2_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add2_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add2_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add2_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add2_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_add2_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, depth, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add2_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, port_string,
- "port");
-
-static cmdline_parse_token_num_t cmd_route_add2_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add2_result, port, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add2_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add2_result, ether_string,
- "ether");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add2_nh_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add2_result, nh_ip);
-
-static cmdline_parse_inst_t cmd_route_add2 = {
- .f = cmd_route_add2_parsed,
- .data = NULL,
- .help_str = "Route add (mpls = no, qinq = no, arp = yes)",
- .tokens = {
- (void *)&cmd_route_add2_p_string,
- (void *)&cmd_route_add2_p,
- (void *)&cmd_route_add2_route_string,
- (void *)&cmd_route_add2_add_string,
- (void *)&cmd_route_add2_ip,
- (void *)&cmd_route_add2_depth,
- (void *)&cmd_route_add2_port_string,
- (void *)&cmd_route_add2_port,
- (void *)&cmd_route_add2_ether_string,
- (void *)&cmd_route_add2_nh_ip,
- NULL,
- },
-};
-
/*
- * route add (mpls = no, qinq = yes, arp = no)
+ * route
+ *
+ * route add (ARP = ON/OFF, MPLS = ON/OFF, QINQ = ON/OFF):
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr> qinq <svlan> <cvlan>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr> qinq <svlan> <cvlan>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr> mpls <mpls labels>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr> mpls <mpls labels>
+ *
+ * route add default:
+ * p <pipelineid> route add default <portid>
+ *
+ * route del:
+ * p <pipelineid> route del <ipaddr> <depth>
+ *
+ * route del default:
+ * p <pipelineid> route del default
+ *
+ * route ls:
+ * p <pipelineid> route ls
*/
-struct cmd_route_add3_result {
+struct cmd_route_result {
cmdline_fixed_string_t p_string;
uint32_t p;
cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- struct ether_addr macaddr;
- cmdline_fixed_string_t qinq_string;
- uint32_t svlan;
- uint32_t cvlan;
+ cmdline_multi_string_t multi_string;
};
static void
-cmd_route_add3_parsed(
+cmd_route_parsed(
void *parsed_result,
__rte_unused struct cmdline *cl,
void *data)
{
- struct cmd_route_add3_result *params = parsed_result;
+ struct cmd_route_result *params = parsed_result;
struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- int status;
-
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = PIPELINE_ROUTING_ROUTE_QINQ;
- route_data.port_id = params->port;
- route_data.ethernet.macaddr = params->macaddr;
- route_data.l2.qinq.svlan = params->svlan;
- route_data.l2.qinq.cvlan = params->cvlan;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
-
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
-}
-static cmdline_parse_token_string_t cmd_route_add3_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add3_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add3_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add3_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add3_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add3_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_add3_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, depth, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add3_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, port_string,
- "port");
-
-static cmdline_parse_token_num_t cmd_route_add3_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, port, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add3_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, ether_string,
- "ether");
-
-static cmdline_parse_token_etheraddr_t cmd_route_add3_macaddr =
- TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add3_result, macaddr);
-
-static cmdline_parse_token_string_t cmd_route_add3_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add3_result, qinq_string,
- "qinq");
-
-static cmdline_parse_token_num_t cmd_route_add3_svlan =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, svlan, UINT32);
-
-static cmdline_parse_token_num_t cmd_route_add3_cvlan =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add3_result, cvlan, UINT32);
-
-static cmdline_parse_inst_t cmd_route_add3 = {
- .f = cmd_route_add3_parsed,
- .data = NULL,
- .help_str = "Route add (qinq = yes, arp = no)",
- .tokens = {
- (void *)&cmd_route_add3_p_string,
- (void *)&cmd_route_add3_p,
- (void *)&cmd_route_add3_route_string,
- (void *)&cmd_route_add3_add_string,
- (void *)&cmd_route_add3_ip,
- (void *)&cmd_route_add3_depth,
- (void *)&cmd_route_add3_port_string,
- (void *)&cmd_route_add3_port,
- (void *)&cmd_route_add3_ether_string,
- (void *)&cmd_route_add3_macaddr,
- (void *)&cmd_route_add3_qinq_string,
- (void *)&cmd_route_add3_svlan,
- (void *)&cmd_route_add3_cvlan,
- NULL,
- },
-};
-
-/*
- * route add (mpls = no, qinq = yes, arp = yes)
- */
-
-struct cmd_route_add4_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- cmdline_ipaddr_t nh_ip;
- cmdline_fixed_string_t qinq_string;
- uint32_t svlan;
- uint32_t cvlan;
-};
-
-static void
-cmd_route_add4_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add4_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
int status;
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = PIPELINE_ROUTING_ROUTE_QINQ |
- PIPELINE_ROUTING_ROUTE_ARP;
- route_data.port_id = params->port;
- route_data.ethernet.ip =
- rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
- route_data.l2.qinq.svlan = params->svlan;
- route_data.l2.qinq.cvlan = params->cvlan;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
-
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
if (status != 0) {
- printf("Command failed\n");
+ printf(CMD_MSG_TOO_MANY_ARGS, "route");
return;
}
-}
-
-static cmdline_parse_token_string_t cmd_route_add4_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add4_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, p, UINT32);
-static cmdline_parse_token_string_t cmd_route_add4_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add4_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add4_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add4_result, ip);
+ /* route add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ struct in_addr ipv4, nh_ipv4;
+ struct ether_addr mac_addr;
+ uint32_t depth, port_id, svlan, cvlan, i;
+ uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
+ uint32_t n_labels = RTE_DIM(mpls_labels);
+
+ memset(&key, 0, sizeof(key));
+ memset(&route_data, 0, sizeof(route_data));
+
+ if (n_tokens < 7) {
+ printf(CMD_MSG_NOT_ENOUGH_ARGS, "route add");
+ return;
+ }
-static cmdline_parse_token_num_t cmd_route_add4_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, depth, UINT32);
+ if (parse_ipv4_addr(tokens[1], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
-static cmdline_parse_token_string_t cmd_route_add4_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, port_string,
- "port");
+ if (parser_read_uint32(&depth, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
-static cmdline_parse_token_num_t cmd_route_add4_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, port, UINT32);
+ if (strcmp(tokens[3], "port")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
-static cmdline_parse_token_string_t cmd_route_add4_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, ether_string,
- "ether");
+ if (parser_read_uint32(&port_id, tokens[4])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-static cmdline_parse_token_ipaddr_t cmd_route_add4_nh_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add4_result, nh_ip);
+ if (strcmp(tokens[5], "ether")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "ether");
+ return;
+ }
-static cmdline_parse_token_string_t cmd_route_add4_qinq_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add4_result, qinq_string,
- "qinq");
+ if (parse_mac_addr(tokens[6], &mac_addr)) {
+ if (parse_ipv4_addr(tokens[6], &nh_ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "nhmacaddr or nhipaddr");
+ return;
+ }
-static cmdline_parse_token_num_t cmd_route_add4_svlan =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, svlan, UINT32);
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_ARP;
+ }
-static cmdline_parse_token_num_t cmd_route_add4_cvlan =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add4_result, cvlan, UINT32);
+ if (n_tokens > 7) {
+ if (strcmp(tokens[7], "mpls") == 0) {
+ if (n_tokens != 9) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add mpls");
+ return;
+ }
+
+ if (parse_mpls_labels(tokens[8], mpls_labels, &n_labels)) {
+ printf(CMD_MSG_INVALID_ARG, "mpls labels");
+ return;
+ }
+
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ } else if (strcmp(tokens[7], "qinq") == 0) {
+ if (n_tokens != 10) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add qinq");
+ return;
+ }
+
+ if (parser_read_uint32(&svlan, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
+ if (parser_read_uint32(&cvlan, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
+
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ } else {
+ printf(CMD_MSG_ARG_NOT_FOUND, "mpls or qinq");
+ return;
+ }
+ }
-static cmdline_parse_inst_t cmd_route_add4 = {
- .f = cmd_route_add4_parsed,
- .data = NULL,
- .help_str = "Route add (qinq = yes, arp = yes)",
- .tokens = {
- (void *)&cmd_route_add4_p_string,
- (void *)&cmd_route_add4_p,
- (void *)&cmd_route_add4_route_string,
- (void *)&cmd_route_add4_add_string,
- (void *)&cmd_route_add4_ip,
- (void *)&cmd_route_add4_depth,
- (void *)&cmd_route_add4_port_string,
- (void *)&cmd_route_add4_port,
- (void *)&cmd_route_add4_ether_string,
- (void *)&cmd_route_add4_nh_ip,
- (void *)&cmd_route_add4_qinq_string,
- (void *)&cmd_route_add4_svlan,
- (void *)&cmd_route_add4_cvlan,
- NULL,
- },
-};
+ switch (route_data.flags) {
+ case 0:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ break;
-/*
- * route add (mpls = yes, qinq = no, arp = no)
- */
+ case PIPELINE_ROUTING_ROUTE_ARP:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ break;
-struct cmd_route_add5_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- struct ether_addr macaddr;
- cmdline_fixed_string_t mpls_string;
- cmdline_fixed_string_t mpls_labels;
-};
+ case PIPELINE_ROUTING_ROUTE_MPLS:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ for (i = 0; i < n_labels; i++)
+ route_data.l2.mpls.labels[i] = mpls_labels[i];
+ route_data.l2.mpls.n_labels = n_labels;
+ break;
-static void
-cmd_route_add5_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add5_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
- uint32_t n_labels = RTE_DIM(mpls_labels);
- uint32_t i;
- int status;
+ case PIPELINE_ROUTING_ROUTE_MPLS | PIPELINE_ROUTING_ROUTE_ARP:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ for (i = 0; i < n_labels; i++)
+ route_data.l2.mpls.labels[i] = mpls_labels[i];
+ route_data.l2.mpls.n_labels = n_labels;
+ break;
- /* Parse MPLS labels */
- status = parse_labels(params->mpls_labels, mpls_labels, &n_labels);
- if (status) {
- printf("MPLS labels parse error\n");
- return;
- }
+ case PIPELINE_ROUTING_ROUTE_QINQ:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ route_data.l2.qinq.svlan = svlan;
+ route_data.l2.qinq.cvlan = cvlan;
+ break;
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
+ case PIPELINE_ROUTING_ROUTE_QINQ | PIPELINE_ROUTING_ROUTE_ARP:
+ default:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ route_data.l2.qinq.svlan = svlan;
+ route_data.l2.qinq.cvlan = cvlan;
+ break;
+ }
- route_data.flags = PIPELINE_ROUTING_ROUTE_MPLS;
- route_data.port_id = params->port;
- route_data.ethernet.macaddr = params->macaddr;
- for (i = 0; i < n_labels; i++)
- route_data.l2.mpls.labels[i] = mpls_labels[i];
- route_data.l2.mpls.n_labels = n_labels;
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.depth = depth;
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route add");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_add5_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add5_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add5_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add5_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add5_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add5_result, ip);
+ } /* route add */
-static cmdline_parse_token_num_t cmd_route_add5_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, depth, UINT32);
+ /* route add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
-static cmdline_parse_token_string_t cmd_route_add5_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, port_string,
- "port");
-
-static cmdline_parse_token_num_t cmd_route_add5_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add5_result, port, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add5_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, ether_string,
- "ether");
-
-static cmdline_parse_token_etheraddr_t cmd_route_add5_macaddr =
- TOKEN_ETHERADDR_INITIALIZER(struct cmd_route_add5_result, macaddr);
-
-static cmdline_parse_token_string_t cmd_route_add5_mpls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, mpls_string,
- "mpls");
-
-static cmdline_parse_token_string_t cmd_route_add5_mpls_labels =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add5_result, mpls_labels,
- NULL);
-
-static cmdline_parse_inst_t cmd_route_add5 = {
- .f = cmd_route_add5_parsed,
- .data = NULL,
- .help_str = "Route add (mpls = yes, arp = no)",
- .tokens = {
- (void *)&cmd_route_add5_p_string,
- (void *)&cmd_route_add5_p,
- (void *)&cmd_route_add5_route_string,
- (void *)&cmd_route_add5_add_string,
- (void *)&cmd_route_add5_ip,
- (void *)&cmd_route_add5_depth,
- (void *)&cmd_route_add5_port_string,
- (void *)&cmd_route_add5_port,
- (void *)&cmd_route_add5_ether_string,
- (void *)&cmd_route_add5_macaddr,
- (void *)&cmd_route_add5_mpls_string,
- (void *)&cmd_route_add5_mpls_labels,
- NULL,
- },
-};
-
-/*
- * route add (mpls = yes, qinq = no, arp = yes)
- */
-
-struct cmd_route_add6_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
- cmdline_fixed_string_t port_string;
- uint32_t port;
- cmdline_fixed_string_t ether_string;
- cmdline_ipaddr_t nh_ip;
- cmdline_fixed_string_t mpls_string;
- cmdline_fixed_string_t mpls_labels;
-};
-
-static void
-cmd_route_add6_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add6_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
- uint32_t n_labels = RTE_DIM(mpls_labels);
- uint32_t i;
- int status;
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add default");
+ return;
+ }
- /* Parse MPLS labels */
- status = parse_labels(params->mpls_labels, mpls_labels, &n_labels);
- if (status) {
- printf("MPLS labels parse error\n");
- return;
- }
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
-
- route_data.flags = PIPELINE_ROUTING_ROUTE_MPLS |
- PIPELINE_ROUTING_ROUTE_ARP;
- route_data.port_id = params->port;
- route_data.ethernet.ip =
- rte_bswap32((uint32_t) params->nh_ip.addr.ipv4.s_addr);
- for (i = 0; i < n_labels; i++)
- route_data.l2.mpls.labels[i] = mpls_labels[i];
- route_data.l2.mpls.n_labels = n_labels;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
+ status = app_pipeline_routing_add_default_route(app,
+ params->p,
+ port_id);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route add default");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_add6_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_add6_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add6_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_add6_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, add_string,
- "add");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add6_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add6_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_add6_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, depth, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_add6_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, port_string,
- "port");
+ } /* route add default */
-static cmdline_parse_token_num_t cmd_route_add6_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add6_result, port, UINT32);
+ /* route del*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_route_key key;
+ struct in_addr ipv4;
+ uint32_t depth;
-static cmdline_parse_token_string_t cmd_route_add6_ether_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, ether_string,
- "ether");
-
-static cmdline_parse_token_ipaddr_t cmd_route_add6_nh_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_add6_result, nh_ip);
-
-static cmdline_parse_token_string_t cmd_route_add6_mpls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, mpls_string,
- "mpls");
-
-static cmdline_parse_token_string_t cmd_route_add6_mpls_labels =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add6_result, mpls_labels,
- NULL);
-
-static cmdline_parse_inst_t cmd_route_add6 = {
- .f = cmd_route_add6_parsed,
- .data = NULL,
- .help_str = "Route add (mpls = yes, arp = yes)",
- .tokens = {
- (void *)&cmd_route_add6_p_string,
- (void *)&cmd_route_add6_p,
- (void *)&cmd_route_add6_route_string,
- (void *)&cmd_route_add6_add_string,
- (void *)&cmd_route_add6_ip,
- (void *)&cmd_route_add6_depth,
- (void *)&cmd_route_add6_port_string,
- (void *)&cmd_route_add6_port,
- (void *)&cmd_route_add6_ether_string,
- (void *)&cmd_route_add6_nh_ip,
- (void *)&cmd_route_add6_mpls_string,
- (void *)&cmd_route_add6_mpls_labels,
- NULL,
- },
-};
+ memset(&key, 0, sizeof(key));
-/*
- * route del
- */
-
-struct cmd_route_del_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t del_string;
- cmdline_ipaddr_t ip;
- uint32_t depth;
-};
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route del");
+ return;
+ }
-static void
-cmd_route_del_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_del_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing_route_key key;
+ if (parse_ipv4_addr(tokens[1], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
- int status;
+ if (parser_read_uint32(&depth, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
- /* Create route */
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
- key.key.ipv4.depth = params->depth;
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.depth = depth;
- status = app_pipeline_routing_delete_route(app, params->p, &key);
+ status = app_pipeline_routing_delete_route(app, params->p, &key);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route del");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_del_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_del_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_del_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_del_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, route_string,
- "route");
-
-static cmdline_parse_token_string_t cmd_route_del_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_result, del_string,
- "del");
-
-static cmdline_parse_token_ipaddr_t cmd_route_del_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_route_del_result, ip);
-
-static cmdline_parse_token_num_t cmd_route_del_depth =
- TOKEN_NUM_INITIALIZER(struct cmd_route_del_result, depth, UINT32);
-
-static cmdline_parse_inst_t cmd_route_del = {
- .f = cmd_route_del_parsed,
- .data = NULL,
- .help_str = "Route delete",
- .tokens = {
- (void *)&cmd_route_del_p_string,
- (void *)&cmd_route_del_p,
- (void *)&cmd_route_del_route_string,
- (void *)&cmd_route_del_del_string,
- (void *)&cmd_route_del_ip,
- (void *)&cmd_route_del_depth,
- NULL,
- },
-};
-
-/*
- * route add default
- */
-
-struct cmd_route_add_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t default_string;
- uint32_t port;
-};
-
-static void
-cmd_route_add_default_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_route_add_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ } /* route del */
+
+ /* route del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route del default");
+ return;
+ }
- status = app_pipeline_routing_add_default_route(app, params->p,
- params->port);
+ status = app_pipeline_routing_delete_default_route(app,
+ params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route del default");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_add_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result, p_string,
- "p");
+ } /* route del default */
-static cmdline_parse_token_num_t cmd_route_add_default_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add_default_result, p, UINT32);
-
-cmdline_parse_token_string_t cmd_route_add_default_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
- route_string, "route");
-
-cmdline_parse_token_string_t cmd_route_add_default_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
- add_string, "add");
-
-cmdline_parse_token_string_t cmd_route_add_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_add_default_result,
- default_string, "default");
-
-cmdline_parse_token_num_t cmd_route_add_default_port =
- TOKEN_NUM_INITIALIZER(struct cmd_route_add_default_result,
- port, UINT32);
-
-cmdline_parse_inst_t cmd_route_add_default = {
- .f = cmd_route_add_default_parsed,
- .data = NULL,
- .help_str = "Route default set",
- .tokens = {
- (void *)&cmd_route_add_default_p_string,
- (void *)&cmd_route_add_default_p,
- (void *)&cmd_route_add_default_route_string,
- (void *)&cmd_route_add_default_add_string,
- (void *)&cmd_route_add_default_default_string,
- (void *)&cmd_route_add_default_port,
- NULL,
- },
-};
-
-/*
- * route del default
- */
-
-struct cmd_route_del_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t default_string;
-};
-
-static void
-cmd_route_del_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_del_default_result *params = parsed_result;
- struct app_params *app = data;
- int status;
+ /* route ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route ls");
+ return;
+ }
- status = app_pipeline_routing_delete_default_route(app, params->p);
+ status = app_pipeline_routing_route_ls(app, params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route ls");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_route_del_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_route_del_default_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_del_default_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_del_default_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
- route_string, "route");
-
-static cmdline_parse_token_string_t cmd_route_del_default_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
- del_string, "del");
-
-static cmdline_parse_token_string_t cmd_route_del_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_del_default_result,
- default_string, "default");
-
-
-static cmdline_parse_inst_t cmd_route_del_default = {
- .f = cmd_route_del_default_parsed,
- .data = NULL,
- .help_str = "Route default clear",
- .tokens = {
- (void *)&cmd_route_del_default_p_string,
- (void *)&cmd_route_del_default_p,
- (void *)&cmd_route_del_default_route_string,
- (void *)&cmd_route_del_default_del_string,
- (void *)&cmd_route_del_default_default_string,
- NULL,
- },
-};
-
-/*
- * route ls
- */
-
-struct cmd_route_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_fixed_string_t ls_string;
-};
-
-static void
-cmd_route_ls_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_ls_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_routing_route_ls(app, params->p);
+ } /* route ls */
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
+ printf(CMD_MSG_MISMATCH_ARGS, "route");
}
-static cmdline_parse_token_string_t cmd_route_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result, p_string, "p");
+static cmdline_parse_token_string_t cmd_route_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, p_string, "p");
-static cmdline_parse_token_num_t cmd_route_ls_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_ls_result, p, UINT32);
+static cmdline_parse_token_num_t cmd_route_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_result, p, UINT32);
-static cmdline_parse_token_string_t cmd_route_ls_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result,
- route_string, "route");
+static cmdline_parse_token_string_t cmd_route_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, route_string, "route");
-static cmdline_parse_token_string_t cmd_route_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_route_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, multi_string,
+ TOKEN_STRING_MULTI);
-static cmdline_parse_inst_t cmd_route_ls = {
- .f = cmd_route_ls_parsed,
+static cmdline_parse_inst_t cmd_route = {
+ .f = cmd_route_parsed,
.data = NULL,
- .help_str = "Route list",
+ .help_str = "route add / add default / del / del default / ls",
.tokens = {
- (void *)&cmd_route_ls_p_string,
- (void *)&cmd_route_ls_p,
- (void *)&cmd_route_ls_route_string,
- (void *)&cmd_route_ls_ls_string,
+ (void *)&cmd_route_p_string,
+ (void *)&cmd_route_p,
+ (void *)&cmd_route_route_string,
+ (void *)&cmd_route_multi_string,
NULL,
},
};
/*
- * arp add
+ * arp
+ *
+ * arp add:
+ * p <pipelineid> arp add <portid> <ipaddr> <macaddr>
+ *
+ * arp add default:
+ * p <pipelineid> arp add default <portid>
+ *
+ * arp del:
+ * p <pipelineid> arp del <portid> <ipaddr>
+ *
+ * arp del default:
+ * p <pipelineid> arp del default
+ *
+ * arp ls:
+ * p <pipelineid> arp ls
*/
-struct cmd_arp_add_result {
+struct cmd_arp_result {
cmdline_fixed_string_t p_string;
uint32_t p;
cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t add_string;
- uint32_t port_id;
- cmdline_ipaddr_t ip;
- struct ether_addr macaddr;
-
+ cmdline_multi_string_t multi_string;
};
static void
-cmd_arp_add_parsed(
+cmd_arp_parsed(
void *parsed_result,
__rte_unused struct cmdline *cl,
void *data)
{
- struct cmd_arp_add_result *params = parsed_result;
+ struct cmd_arp_result *params = parsed_result;
struct app_params *app = data;
- struct pipeline_routing_arp_key key;
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
int status;
- key.type = PIPELINE_ROUTING_ARP_IPV4;
- key.key.ipv4.port_id = params->port_id;
- key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
-
- status = app_pipeline_routing_add_arp_entry(app,
- params->p,
- &key,
- &params->macaddr);
-
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
if (status != 0) {
- printf("Command failed\n");
+ printf(CMD_MSG_TOO_MANY_ARGS, "arp");
return;
}
-}
-
-static cmdline_parse_token_string_t cmd_arp_add_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_arp_add_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_add_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, arp_string, "arp");
-static cmdline_parse_token_string_t cmd_arp_add_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, add_string, "add");
+ /* arp add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_arp_key key;
+ struct in_addr ipv4;
+ struct ether_addr mac_addr;
+ uint32_t port_id;
-static cmdline_parse_token_num_t cmd_arp_add_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, port_id, UINT32);
-
-static cmdline_parse_token_ipaddr_t cmd_arp_add_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_arp_add_result, ip);
-
-static cmdline_parse_token_etheraddr_t cmd_arp_add_macaddr =
- TOKEN_ETHERADDR_INITIALIZER(struct cmd_arp_add_result, macaddr);
-
-static cmdline_parse_inst_t cmd_arp_add = {
- .f = cmd_arp_add_parsed,
- .data = NULL,
- .help_str = "ARP add",
- .tokens = {
- (void *)&cmd_arp_add_p_string,
- (void *)&cmd_arp_add_p,
- (void *)&cmd_arp_add_arp_string,
- (void *)&cmd_arp_add_add_string,
- (void *)&cmd_arp_add_port_id,
- (void *)&cmd_arp_add_ip,
- (void *)&cmd_arp_add_macaddr,
- NULL,
- },
-};
+ memset(&key, 0, sizeof(key));
-/*
- * arp del
- */
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp add");
+ return;
+ }
-struct cmd_arp_del_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t del_string;
- uint32_t port_id;
- cmdline_ipaddr_t ip;
-};
+ if (parser_read_uint32(&port_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-static void
-cmd_arp_del_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_del_result *params = parsed_result;
- struct app_params *app = data;
+ if (parse_ipv4_addr(tokens[2], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
- struct pipeline_routing_arp_key key;
- int status;
+ if (parse_mac_addr(tokens[3], &mac_addr)) {
+ printf(CMD_MSG_INVALID_ARG, "macaddr");
+ return;
+ }
- key.type = PIPELINE_ROUTING_ARP_IPV4;
- key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
- key.key.ipv4.port_id = params->port_id;
+ key.type = PIPELINE_ROUTING_ARP_IPV4;
+ key.key.ipv4.port_id = port_id;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
- status = app_pipeline_routing_delete_arp_entry(app, params->p, &key);
+ status = app_pipeline_routing_add_arp_entry(app,
+ params->p,
+ &key,
+ &mac_addr);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp add");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_arp_del_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, p_string,
- "p");
+ } /* arp add */
-static cmdline_parse_token_num_t cmd_arp_del_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_del_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "arp");
-
-static cmdline_parse_token_string_t cmd_arp_del_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, del_string, "del");
-
-static cmdline_parse_token_num_t cmd_arp_del_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, port_id, UINT32);
-
-static cmdline_parse_token_ipaddr_t cmd_arp_del_ip =
- TOKEN_IPV4_INITIALIZER(struct cmd_arp_del_result, ip);
-
-static cmdline_parse_inst_t cmd_arp_del = {
- .f = cmd_arp_del_parsed,
- .data = NULL,
- .help_str = "ARP delete",
- .tokens = {
- (void *)&cmd_arp_del_p_string,
- (void *)&cmd_arp_del_p,
- (void *)&cmd_arp_del_arp_string,
- (void *)&cmd_arp_del_del_string,
- (void *)&cmd_arp_del_port_id,
- (void *)&cmd_arp_del_ip,
- NULL,
- },
-};
-
-/*
- * arp add default
- */
-
-struct cmd_arp_add_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t add_string;
- cmdline_fixed_string_t default_string;
- uint32_t port_id;
-};
+ /* arp add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
-static void
-cmd_arp_add_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_add_default_result *params = parsed_result;
- struct app_params *app = data;
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp add default");
+ return;
+ }
- int status;
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
- status = app_pipeline_routing_add_default_arp_entry(app,
- params->p,
- params->port_id);
+ status = app_pipeline_routing_add_default_arp_entry(app,
+ params->p,
+ port_id);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp add default");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_arp_add_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_arp_add_default_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_add_default_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_add_default_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, arp_string,
- "arp");
-
-static cmdline_parse_token_string_t cmd_arp_add_default_add_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result, add_string,
- "add");
+ } /* arp add default */
-static cmdline_parse_token_string_t cmd_arp_add_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_add_default_result,
- default_string, "default");
+ /* arp del*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_arp_key key;
+ struct in_addr ipv4;
+ uint32_t port_id;
-static cmdline_parse_token_num_t cmd_arp_add_default_port_id =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_add_default_result, port_id,
- UINT32);
+ memset(&key, 0, sizeof(key));
-static cmdline_parse_inst_t cmd_arp_add_default = {
- .f = cmd_arp_add_default_parsed,
- .data = NULL,
- .help_str = "ARP add default",
- .tokens = {
- (void *)&cmd_arp_add_default_p_string,
- (void *)&cmd_arp_add_default_p,
- (void *)&cmd_arp_add_default_arp_string,
- (void *)&cmd_arp_add_default_add_string,
- (void *)&cmd_arp_add_default_default_string,
- (void *)&cmd_arp_add_default_port_id,
- NULL,
- },
-};
-
-/*
- * arp del default
- */
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp del");
+ return;
+ }
-struct cmd_arp_del_default_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t del_string;
- cmdline_fixed_string_t default_string;
-};
+ if (parser_read_uint32(&port_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
-static void
-cmd_arp_del_default_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_del_default_result *params = parsed_result;
- struct app_params *app = data;
+ if (parse_ipv4_addr(tokens[2], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
- int status;
+ key.type = PIPELINE_ROUTING_ARP_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.port_id = port_id;
- status = app_pipeline_routing_delete_default_arp_entry(app, params->p);
+ status = app_pipeline_routing_delete_arp_entry(app,
+ params->p,
+ &key);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp del");
- if (status != 0) {
- printf("Command failed\n");
return;
- }
-}
-
-static cmdline_parse_token_string_t cmd_arp_del_default_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_arp_del_default_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_del_default_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_del_default_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, arp_string,
- "arp");
-
-static cmdline_parse_token_string_t cmd_arp_del_default_del_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result, del_string,
- "del");
-
-static cmdline_parse_token_string_t cmd_arp_del_default_default_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_del_default_result,
- default_string, "default");
-
-static cmdline_parse_inst_t cmd_arp_del_default = {
- .f = cmd_arp_del_default_parsed,
- .data = NULL,
- .help_str = "ARP delete default",
- .tokens = {
- (void *)&cmd_arp_del_default_p_string,
- (void *)&cmd_arp_del_default_p,
- (void *)&cmd_arp_del_default_arp_string,
- (void *)&cmd_arp_del_default_del_string,
- (void *)&cmd_arp_del_default_default_string,
- NULL,
- },
-};
+ } /* arp del */
+
+ /* arp del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp del default");
+ return;
+ }
+
+ status = app_pipeline_routing_delete_default_arp_entry(app,
+ params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp del default");
+
+ return;
+ } /* arp del default */
+
+ /* arp ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp ls");
+ return;
+ }
-/*
- * arp ls
- */
+ status = app_pipeline_routing_arp_ls(app, params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp ls");
-struct cmd_arp_ls_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_fixed_string_t ls_string;
-};
-
-static void
-cmd_arp_ls_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_ls_result *params = parsed_result;
- struct app_params *app = data;
- struct pipeline_routing *p;
-
- p = app_pipeline_data_fe(app, params->p, &pipeline_routing);
- if (p == NULL)
return;
+ } /* arp ls */
- app_pipeline_routing_arp_ls(app, params->p);
+ printf(CMD_MSG_FAIL, "arp");
}
-static cmdline_parse_token_string_t cmd_arp_ls_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, p_string,
- "p");
+static cmdline_parse_token_string_t cmd_arp_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, p_string, "p");
-static cmdline_parse_token_num_t cmd_arp_ls_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_ls_result, p, UINT32);
+static cmdline_parse_token_num_t cmd_arp_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_result, p, UINT32);
-static cmdline_parse_token_string_t cmd_arp_ls_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, arp_string,
- "arp");
+static cmdline_parse_token_string_t cmd_arp_arp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, arp_string, "arp");
-static cmdline_parse_token_string_t cmd_arp_ls_ls_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, ls_string,
- "ls");
+static cmdline_parse_token_string_t cmd_arp_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, multi_string,
+ TOKEN_STRING_MULTI);
-static cmdline_parse_inst_t cmd_arp_ls = {
- .f = cmd_arp_ls_parsed,
+static cmdline_parse_inst_t cmd_arp = {
+ .f = cmd_arp_parsed,
.data = NULL,
- .help_str = "ARP list",
+ .help_str = "arp add / add default / del / del default / ls",
.tokens = {
- (void *)&cmd_arp_ls_p_string,
- (void *)&cmd_arp_ls_p,
- (void *)&cmd_arp_ls_arp_string,
- (void *)&cmd_arp_ls_ls_string,
+ (void *)&cmd_arp_p_string,
+ (void *)&cmd_arp_p,
+ (void *)&cmd_arp_arp_string,
+ (void *)&cmd_arp_multi_string,
NULL,
},
};
static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *)&cmd_route_add1,
- (cmdline_parse_inst_t *)&cmd_route_add2,
- (cmdline_parse_inst_t *)&cmd_route_add3,
- (cmdline_parse_inst_t *)&cmd_route_add4,
- (cmdline_parse_inst_t *)&cmd_route_add5,
- (cmdline_parse_inst_t *)&cmd_route_add6,
- (cmdline_parse_inst_t *)&cmd_route_del,
- (cmdline_parse_inst_t *)&cmd_route_add_default,
- (cmdline_parse_inst_t *)&cmd_route_del_default,
- (cmdline_parse_inst_t *)&cmd_route_ls,
- (cmdline_parse_inst_t *)&cmd_arp_add,
- (cmdline_parse_inst_t *)&cmd_arp_del,
- (cmdline_parse_inst_t *)&cmd_arp_add_default,
- (cmdline_parse_inst_t *)&cmd_arp_del_default,
- (cmdline_parse_inst_t *)&cmd_arp_ls,
+ (cmdline_parse_inst_t *)&cmd_route,
+ (cmdline_parse_inst_t *)&cmd_arp,
NULL,
};
static struct pipeline_fe_ops pipeline_routing_fe_ops = {
- .f_init = pipeline_routing_init,
+ .f_init = app_pipeline_routing_init,
+ .f_post_init = app_pipeline_routing_post_init,
.f_free = app_pipeline_routing_free,
+ .f_track = app_pipeline_track_default,
.cmds = pipeline_cmds,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.h b/examples/ip_pipeline/pipeline/pipeline_routing.h
index fa41642b..0197449b 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing.h
+++ b/examples/ip_pipeline/pipeline/pipeline_routing.h
@@ -86,6 +86,13 @@ app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
uint32_t pipeline_id);
/*
+ * SETTINGS
+ */
+int
+app_pipeline_routing_set_macaddr(struct app_params *app,
+ uint32_t pipeline_id);
+
+/*
* Pipeline type
*/
extern struct pipeline_type pipeline_routing;
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.c b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
index bc5bf7a5..21ac7888 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
@@ -65,7 +65,9 @@
((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
-#define MAC_SRC_DEFAULT 0x112233445566ULL
+/* Network Byte Order (NBO) */
+#define SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr, ethertype) \
+ (((uint64_t) macaddr) | (((uint64_t) rte_cpu_to_be_16(ethertype)) << 48))
#ifndef PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s
#define PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s 256
@@ -75,6 +77,7 @@ struct pipeline_routing {
struct pipeline p;
struct pipeline_routing_params params;
pipeline_msg_req_handler custom_handlers[PIPELINE_ROUTING_MSG_REQS];
+ uint64_t macaddr[PIPELINE_MAX_PORT_OUT];
} __rte_cache_aligned;
/*
@@ -132,6 +135,10 @@ static void *
pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p,
void *msg);
+static void *
+pipeline_routing_msg_req_set_macaddr_handler(struct pipeline *p,
+ void *msg);
+
static pipeline_msg_req_handler custom_handlers[] = {
[PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD] =
pipeline_routing_msg_req_route_add_handler,
@@ -149,6 +156,8 @@ static pipeline_msg_req_handler custom_handlers[] = {
pipeline_routing_msg_req_arp_add_default_handler,
[PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT] =
pipeline_routing_msg_req_arp_del_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_SET_MACADDR] =
+ pipeline_routing_msg_req_set_macaddr_handler,
};
/*
@@ -921,6 +930,7 @@ pipeline_routing_parse_args(struct pipeline_routing_params *p,
struct pipeline_params *params)
{
uint32_t n_routes_present = 0;
+ uint32_t port_local_dest_present = 0;
uint32_t encap_present = 0;
uint32_t qinq_sched_present = 0;
uint32_t mpls_color_mark_present = 0;
@@ -933,6 +943,7 @@ pipeline_routing_parse_args(struct pipeline_routing_params *p,
/* default values */
p->n_routes = PIPELINE_ROUTING_N_ROUTES_DEFAULT;
+ p->port_local_dest = params->n_ports_out - 1;
p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET;
p->qinq_sched = 0;
p->mpls_color_mark = 0;
@@ -962,6 +973,23 @@ pipeline_routing_parse_args(struct pipeline_routing_params *p,
continue;
}
+ /* port_local_dest */
+ if (strcmp(arg_name, "port_local_dest") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ port_local_dest_present == 0, params->name,
+ arg_name);
+ port_local_dest_present = 1;
+
+ status = parser_read_uint32(&p->port_local_dest,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
+ (p->port_local_dest < params->n_ports_out)),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
/* encap */
if (strcmp(arg_name, "encap") == 0) {
@@ -1419,27 +1447,6 @@ pipeline_routing_free(void *pipeline)
}
static int
-pipeline_routing_track(void *pipeline,
- __rte_unused uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_in == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-static int
pipeline_routing_timer(void *pipeline)
{
struct pipeline *p = (struct pipeline *) pipeline;
@@ -1534,7 +1541,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether - ARP off */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
(p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t macaddr_dst;
uint64_t ethertype = ETHER_TYPE_IPv4;
@@ -1542,7 +1549,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
macaddr_dst = rte_bswap64(macaddr_dst << 16);
entry_arp0.slab[0] =
- rte_bswap64((macaddr_src << 16) | ethertype);
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype);
entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
entry_arp0.slab[1] = rte_bswap64(macaddr_dst);
@@ -1556,11 +1563,11 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether - ARP on */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t ethertype = ETHER_TYPE_IPv4;
- entry_arp1.slab[0] = rte_bswap64((macaddr_src << 16) |
- ethertype);
+ entry_arp1.slab[0] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype);
entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
entry_arp1.data_offset = entry_arp1.slab_offset[0] - 6
@@ -1571,7 +1578,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether QinQ - ARP off */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
(p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t macaddr_dst;
uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
uint64_t ethertype_vlan = 0x8100;
@@ -1588,8 +1595,8 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
ethertype_ipv4);
entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
- entry_arp0.slab[1] = rte_bswap64((macaddr_src << 16) |
- ethertype_qinq);
+ entry_arp0.slab[1] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_qinq);
entry_arp0.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
entry_arp0.slab[2] = rte_bswap64(macaddr_dst);
@@ -1603,7 +1610,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether QinQ - ARP on */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
uint64_t ethertype_vlan = 0x8100;
uint64_t ethertype_qinq = 0x9100;
@@ -1616,8 +1623,8 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
ethertype_ipv4);
entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
- entry_arp1.slab[1] = rte_bswap64((macaddr_src << 16) |
- ethertype_qinq);
+ entry_arp1.slab[1] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_qinq);
entry_arp1.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
entry_arp1.data_offset = entry_arp1.slab_offset[1] - 6
@@ -1628,7 +1635,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether MPLS - ARP off */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
(p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t macaddr_dst;
uint64_t ethertype_mpls = 0x8847;
@@ -1697,8 +1704,8 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
return rsp;
}
- entry_arp0.slab[2] = rte_bswap64((macaddr_src << 16) |
- ethertype_mpls);
+ entry_arp0.slab[2] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_mpls);
entry_arp0.slab_offset[2] = p_rt->params.ip_hdr_offset -
(n_labels * 4 + 8);
@@ -1714,7 +1721,7 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
/* Ether MPLS - ARP on */
if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = MAC_SRC_DEFAULT;
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
uint64_t ethertype_mpls = 0x8847;
uint64_t label0 = req->data.l2.mpls.labels[0];
@@ -1779,8 +1786,8 @@ pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
return rsp;
}
- entry_arp1.slab[2] = rte_bswap64((macaddr_src << 16) |
- ethertype_mpls);
+ entry_arp1.slab[2] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_mpls);
entry_arp1.slab_offset[2] = p_rt->params.ip_hdr_offset -
(n_labels * 4 + 8);
@@ -1961,10 +1968,25 @@ pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p, void *msg)
return rsp;
}
+void *
+pipeline_routing_msg_req_set_macaddr_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
+ struct pipeline_routing_set_macaddr_msg_req *req = msg;
+ struct pipeline_routing_set_macaddr_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ for (port_id = 0; port_id < p->n_ports_out; port_id++)
+ p_rt->macaddr[port_id] = req->macaddr[port_id];
+
+ rsp->status = 0;
+
+ return rsp;
+}
+
struct pipeline_be_ops pipeline_routing_be_ops = {
.f_init = pipeline_routing_init,
.f_free = pipeline_routing_free,
.f_run = NULL,
.f_timer = pipeline_routing_timer,
- .f_track = pipeline_routing_track,
};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.h b/examples/ip_pipeline/pipeline/pipeline_routing_be.h
index ec767b24..12763427 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.h
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.h
@@ -54,6 +54,7 @@ enum pipeline_routing_encap {
struct pipeline_routing_params {
/* routing */
uint32_t n_routes;
+ uint32_t port_local_dest;
/* routing packet encapsulation */
enum pipeline_routing_encap encap;
@@ -160,6 +161,7 @@ enum pipeline_routing_msg_req_type {
PIPELINE_ROUTING_MSG_REQ_ARP_DEL,
PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT,
PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_SET_MACADDR,
PIPELINE_ROUTING_MSG_REQS
};
@@ -291,6 +293,20 @@ struct pipeline_routing_arp_delete_default_msg_rsp {
int status;
};
+/*
+ * MSG SET MACADDR
+ */
+struct pipeline_routing_set_macaddr_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ uint64_t macaddr[PIPELINE_MAX_PORT_OUT];
+};
+
+struct pipeline_routing_set_macaddr_msg_rsp {
+ int status;
+};
+
extern struct pipeline_be_ops pipeline_routing_be_ops;
#endif
diff --git a/examples/ip_pipeline/pipeline_be.h b/examples/ip_pipeline/pipeline_be.h
index f4ff262e..b562472b 100644
--- a/examples/ip_pipeline/pipeline_be.h
+++ b/examples/ip_pipeline/pipeline_be.h
@@ -40,6 +40,9 @@
#include <rte_port_ras.h>
#include <rte_port_sched.h>
#include <rte_port_source_sink.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_port_kni.h>
+#endif
#include <rte_pipeline.h>
enum pipeline_port_in_type {
@@ -49,6 +52,7 @@ enum pipeline_port_in_type {
PIPELINE_PORT_IN_RING_READER_IPV4_FRAG,
PIPELINE_PORT_IN_RING_READER_IPV6_FRAG,
PIPELINE_PORT_IN_SCHED_READER,
+ PIPELINE_PORT_IN_KNI_READER,
PIPELINE_PORT_IN_SOURCE,
};
@@ -61,6 +65,9 @@ struct pipeline_port_in_params {
struct rte_port_ring_reader_ipv4_frag_params ring_ipv4_frag;
struct rte_port_ring_reader_ipv6_frag_params ring_ipv6_frag;
struct rte_port_sched_reader_params sched;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_reader_params kni;
+#endif
struct rte_port_source_params source;
} params;
uint32_t burst_size;
@@ -82,6 +89,10 @@ pipeline_port_in_params_convert(struct pipeline_port_in_params *p)
return (void *) &p->params.ring_ipv6_frag;
case PIPELINE_PORT_IN_SCHED_READER:
return (void *) &p->params.sched;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_IN_KNI_READER:
+ return (void *) &p->params.kni;
+#endif
case PIPELINE_PORT_IN_SOURCE:
return (void *) &p->params.source;
default:
@@ -105,6 +116,10 @@ pipeline_port_in_params_get_ops(struct pipeline_port_in_params *p)
return &rte_port_ring_reader_ipv6_frag_ops;
case PIPELINE_PORT_IN_SCHED_READER:
return &rte_port_sched_reader_ops;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_IN_KNI_READER:
+ return &rte_port_kni_reader_ops;
+#endif
case PIPELINE_PORT_IN_SOURCE:
return &rte_port_source_ops;
default:
@@ -122,6 +137,8 @@ enum pipeline_port_out_type {
PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS,
PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS,
PIPELINE_PORT_OUT_SCHED_WRITER,
+ PIPELINE_PORT_OUT_KNI_WRITER,
+ PIPELINE_PORT_OUT_KNI_WRITER_NODROP,
PIPELINE_PORT_OUT_SINK,
};
@@ -137,6 +154,10 @@ struct pipeline_port_out_params {
struct rte_port_ring_writer_ipv4_ras_params ring_ipv4_ras;
struct rte_port_ring_writer_ipv6_ras_params ring_ipv6_ras;
struct rte_port_sched_writer_params sched;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_writer_params kni;
+ struct rte_port_kni_writer_nodrop_params kni_nodrop;
+#endif
struct rte_port_sink_params sink;
} params;
};
@@ -163,6 +184,12 @@ pipeline_port_out_params_convert(struct pipeline_port_out_params *p)
return (void *) &p->params.ring_ipv6_ras;
case PIPELINE_PORT_OUT_SCHED_WRITER:
return (void *) &p->params.sched;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_OUT_KNI_WRITER:
+ return (void *) &p->params.kni;
+ case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
+ return (void *) &p->params.kni_nodrop;
+#endif
case PIPELINE_PORT_OUT_SINK:
return (void *) &p->params.sink;
default:
@@ -192,6 +219,12 @@ pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
return &rte_port_ring_writer_ipv6_ras_ops;
case PIPELINE_PORT_OUT_SCHED_WRITER:
return &rte_port_sched_writer_ops;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_OUT_KNI_WRITER:
+ return &rte_port_kni_writer_ops;
+ case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
+ return &rte_port_kni_writer_nodrop_ops;
+#endif
case PIPELINE_PORT_OUT_SINK:
return &rte_port_sink_ops;
default:
@@ -200,15 +233,19 @@ pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
}
#ifndef PIPELINE_NAME_SIZE
-#define PIPELINE_NAME_SIZE 32
+#define PIPELINE_NAME_SIZE 64
+#endif
+
+#ifndef PIPELINE_TYPE_SIZE
+#define PIPELINE_TYPE_SIZE 64
#endif
#ifndef PIPELINE_MAX_PORT_IN
-#define PIPELINE_MAX_PORT_IN 16
+#define PIPELINE_MAX_PORT_IN 64
#endif
#ifndef PIPELINE_MAX_PORT_OUT
-#define PIPELINE_MAX_PORT_OUT 16
+#define PIPELINE_MAX_PORT_OUT 64
#endif
#ifndef PIPELINE_MAX_TABLES
@@ -224,11 +261,12 @@ pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
#endif
#ifndef PIPELINE_MAX_ARGS
-#define PIPELINE_MAX_ARGS 32
+#define PIPELINE_MAX_ARGS 64
#endif
struct pipeline_params {
char name[PIPELINE_NAME_SIZE];
+ char type[PIPELINE_TYPE_SIZE];
struct pipeline_port_in_params port_in[PIPELINE_MAX_PORT_IN];
struct pipeline_port_out_params port_out[PIPELINE_MAX_PORT_OUT];
@@ -261,16 +299,11 @@ typedef int (*pipeline_be_op_run)(void *pipeline);
typedef int (*pipeline_be_op_timer)(void *pipeline);
-typedef int (*pipeline_be_op_track)(void *pipeline,
- uint32_t port_in,
- uint32_t *port_out);
-
struct pipeline_be_ops {
pipeline_be_op_init f_init;
pipeline_be_op_free f_free;
pipeline_be_op_run f_run;
pipeline_be_op_timer f_timer;
- pipeline_be_op_track f_track;
};
/* Pipeline specific config parse error messages */
diff --git a/examples/ip_pipeline/thread_fe.c b/examples/ip_pipeline/thread_fe.c
index 4a435f7c..6c547ca5 100644
--- a/examples/ip_pipeline/thread_fe.c
+++ b/examples/ip_pipeline/thread_fe.c
@@ -5,10 +5,6 @@
#include <cmdline_parse.h>
#include <cmdline_parse_num.h>
#include <cmdline_parse_string.h>
-#include <cmdline_parse_ipaddr.h>
-#include <cmdline_parse_etheraddr.h>
-#include <cmdline_socket.h>
-#include <cmdline.h>
#include "thread.h"
#include "thread_fe.h"
@@ -85,6 +81,9 @@ app_pipeline_enable(struct app_params *app,
p_params = &app->pipeline_params[pipeline_id];
p_type = app_pipeline_type_find(app, p_params->type);
+ if (p_type == NULL)
+ return -1;
+
if (p->enabled == 1)
return -1;
@@ -259,26 +258,26 @@ cmd_pipeline_enable_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_pipeline_enable_t_string =
+static cmdline_parse_token_string_t cmd_pipeline_enable_t_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_string, "t");
-cmdline_parse_token_string_t cmd_pipeline_enable_t_id_string =
+static cmdline_parse_token_string_t cmd_pipeline_enable_t_id_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_id_string,
NULL);
-cmdline_parse_token_string_t cmd_pipeline_enable_pipeline_string =
+static cmdline_parse_token_string_t cmd_pipeline_enable_pipeline_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_string,
"pipeline");
-cmdline_parse_token_num_t cmd_pipeline_enable_pipeline_id =
+static cmdline_parse_token_num_t cmd_pipeline_enable_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_pipeline_enable_enable_string =
+static cmdline_parse_token_string_t cmd_pipeline_enable_enable_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, enable_string,
"enable");
-cmdline_parse_inst_t cmd_pipeline_enable = {
+static cmdline_parse_inst_t cmd_pipeline_enable = {
.f = cmd_pipeline_enable_parsed,
.data = NULL,
.help_str = "Enable pipeline on specified core",
@@ -333,26 +332,26 @@ cmd_pipeline_disable_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_pipeline_disable_t_string =
+static cmdline_parse_token_string_t cmd_pipeline_disable_t_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_string, "t");
-cmdline_parse_token_string_t cmd_pipeline_disable_t_id_string =
+static cmdline_parse_token_string_t cmd_pipeline_disable_t_id_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_id_string,
NULL);
-cmdline_parse_token_string_t cmd_pipeline_disable_pipeline_string =
+static cmdline_parse_token_string_t cmd_pipeline_disable_pipeline_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result,
pipeline_string, "pipeline");
-cmdline_parse_token_num_t cmd_pipeline_disable_pipeline_id =
+static cmdline_parse_token_num_t cmd_pipeline_disable_pipeline_id =
TOKEN_NUM_INITIALIZER(struct cmd_pipeline_disable_result, pipeline_id,
UINT32);
-cmdline_parse_token_string_t cmd_pipeline_disable_disable_string =
+static cmdline_parse_token_string_t cmd_pipeline_disable_disable_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, disable_string,
"disable");
-cmdline_parse_inst_t cmd_pipeline_disable = {
+static cmdline_parse_inst_t cmd_pipeline_disable = {
.f = cmd_pipeline_disable_parsed,
.data = NULL,
.help_str = "Disable pipeline on specified core",
@@ -405,19 +404,19 @@ cmd_thread_headroom_parsed(
printf("Command failed\n");
}
-cmdline_parse_token_string_t cmd_thread_headroom_t_string =
+static cmdline_parse_token_string_t cmd_thread_headroom_t_string =
TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
t_string, "t");
-cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
+static cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
t_id_string, NULL);
-cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
+static cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
headroom_string, "headroom");
-cmdline_parse_inst_t cmd_thread_headroom = {
+static cmdline_parse_inst_t cmd_thread_headroom = {
.f = cmd_thread_headroom_parsed,
.data = NULL,
.help_str = "Display thread headroom",
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index c27e7353..ef09a2ed 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -963,7 +963,7 @@ init_mem(void)
RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
- lpm6 = rte_lpm6_create("IP_RSMBL_LPM6", socket, &lpm6_config);
+ lpm6 = rte_lpm6_create(buf, socket, &lpm6_config);
if (lpm6 == NULL) {
RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
return -1;
@@ -1040,9 +1040,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
- else if (nb_ports == 0)
+ if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No ports found!\n");
nb_lcores = rte_lcore_count();
diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile
index f9b59c22..06b6db1e 100644
--- a/examples/ipsec-secgw/Makefile
+++ b/examples/ipsec-secgw/Makefile
@@ -46,15 +46,17 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
CFLAGS_sa.o += -diag-disable=vec
endif
-
-VPATH += $(SRCDIR)/librte_ipsec
+ifeq ($(DEBUG),1)
+CFLAGS += -DIPSEC_DEBUG -fstack-protector-all -O0
+endif
#
# all source are stored in SRCS-y
#
SRCS-y += ipsec.c
SRCS-y += esp.c
-SRCS-y += sp.c
+SRCS-y += sp4.c
+SRCS-y += sp6.c
SRCS-y += sa.c
SRCS-y += rt.c
SRCS-y += ipsec-secgw.c
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 19273807..05caa77a 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -37,11 +37,11 @@
#include <sys/stat.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <fcntl.h>
#include <unistd.h>
#include <rte_common.h>
-#include <rte_memcpy.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_random.h>
@@ -50,15 +50,13 @@
#include "esp.h"
#include "ipip.h"
-#define IP_ESP_HDR_SZ (sizeof(struct ip) + sizeof(struct esp_hdr))
-
static inline void
random_iv_u64(uint64_t *buf, uint16_t n)
{
- unsigned left = n & 0x7;
- unsigned i;
+ uint32_t left = n & 0x7;
+ uint32_t i;
- IPSEC_ASSERT((n & 0x3) == 0);
+ RTE_ASSERT((n & 0x3) == 0);
for (i = 0; i < (n >> 3); i++)
buf[i] = rte_rand();
@@ -67,23 +65,35 @@ random_iv_u64(uint64_t *buf, uint16_t n)
*((uint32_t *)&buf[i]) = (uint32_t)lrand48();
}
-/* IPv4 Tunnel */
int
-esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
- int32_t payload_len;
+ struct ip *ip4;
struct rte_crypto_sym_op *sym_cop;
+ int32_t payload_len, ip_hdr_len;
+
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
+
+ ip4 = rte_pktmbuf_mtod(m, struct ip *);
+ if (likely(ip4->ip_v == IPVERSION))
+ ip_hdr_len = ip4->ip_hl * 4;
+ else if (ip4->ip_v == IP6_VERSION)
+ /* XXX No option headers supported */
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ else {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
+ ip4->ip_v);
+ return -EINVAL;
+ }
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
-
- payload_len = rte_pktmbuf_pkt_len(m) - IP_ESP_HDR_SZ - sa->iv_len -
- sa->digest_len;
+ payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
+ sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
+ RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
payload_len, sa->block_size);
return -EINVAL;
}
@@ -91,21 +101,19 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = IP_ESP_HDR_SZ + sa->iv_len;
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
sym_cop->cipher.data.length = payload_len;
sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, void*,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
- sym_cop->auth.data.offset = sizeof(struct ip);
- if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
- sym_cop->auth.data.length = sizeof(struct esp_hdr);
- else
- sym_cop->auth.data.length = sizeof(struct esp_hdr) +
- sa->iv_len + payload_len;
+ sym_cop->auth.data.offset = ip_hdr_len;
+ sym_cop->auth.data.length = sizeof(struct esp_hdr) +
+ sa->iv_len + payload_len;
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
@@ -117,19 +125,21 @@ esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
}
int
-esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
+ struct ip *ip4, *ip;
+ struct ip6_hdr *ip6;
uint8_t *nexthdr, *pad_len;
uint8_t *padding;
uint16_t i;
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
return -1;
}
@@ -139,111 +149,187 @@ esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
padding = pad_len - *pad_len;
for (i = 0; i < *pad_len; i++) {
- if (padding[i] != i) {
- IPSEC_LOG(ERR, IPSEC_ESP, "invalid pad_len field\n");
+ if (padding[i] != i + 1) {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
return -EINVAL;
}
}
if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
- IPSEC_LOG(ERR, IPSEC_ESP,
+ RTE_LOG(ERR, IPSEC_ESP,
"failed to remove pad_len + digest\n");
return -EINVAL;
}
- return ip4ip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+ if (unlikely(sa->flags == TRANSPORT)) {
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ ip4 = (struct ip *)rte_pktmbuf_adj(m,
+ sizeof(struct esp_hdr) + sa->iv_len);
+ if (likely(ip->ip_v == IPVERSION)) {
+ memmove(ip4, ip, ip->ip_hl * 4);
+ ip4->ip_p = *nexthdr;
+ ip4->ip_len = htons(rte_pktmbuf_data_len(m));
+ } else {
+ ip6 = (struct ip6_hdr *)ip4;
+ /* XXX No option headers supported */
+ memmove(ip6, ip, sizeof(struct ip6_hdr));
+ ip6->ip6_nxt = *nexthdr;
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ }
+ } else
+ ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
+
+ return 0;
}
int
-esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
- uint16_t pad_payload_len, pad_len;
- struct ip *ip;
- struct esp_hdr *esp;
- int i;
- char *padding;
+ struct ip *ip4;
+ struct ip6_hdr *ip6;
+ struct esp_hdr *esp = NULL;
+ uint8_t *padding, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
+ int32_t i;
+ uint16_t pad_payload_len, pad_len, ip_hdr_len;
+
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
+
+ ip_hdr_len = 0;
+
+ ip4 = rte_pktmbuf_mtod(m, struct ip *);
+ if (likely(ip4->ip_v == IPVERSION)) {
+ if (unlikely(sa->flags == TRANSPORT)) {
+ ip_hdr_len = ip4->ip_hl * 4;
+ nlp = ip4->ip_p;
+ } else
+ nlp = IPPROTO_IPIP;
+ } else if (ip4->ip_v == IP6_VERSION) {
+ if (unlikely(sa->flags == TRANSPORT)) {
+ /* XXX No option headers supported */
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ ip6 = (struct ip6_hdr *)ip4;
+ nlp = ip6->ip6_nxt;
+ } else
+ nlp = IPPROTO_IPV6;
+ } else {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
+ ip4->ip_v);
+ return -EINVAL;
+ }
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
-
- /* Payload length */
- pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) + 2,
- sa->block_size);
- pad_len = pad_payload_len - rte_pktmbuf_pkt_len(m);
-
- rte_prefetch0(rte_pktmbuf_mtod_offset(m, void *,
- rte_pktmbuf_pkt_len(m)));
+ /* Padded payload length */
+ pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
+ ip_hdr_len + 2, sa->block_size);
+ pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
+
+ RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
+ sa->flags == TRANSPORT);
+
+ if (likely(sa->flags == IP4_TUNNEL))
+ ip_hdr_len = sizeof(struct ip);
+ else if (sa->flags == IP6_TUNNEL)
+ ip_hdr_len = sizeof(struct ip6_hdr);
+ else if (sa->flags != TRANSPORT) {
+ RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
+ sa->flags);
+ return -EINVAL;
+ }
/* Check maximum packet size */
- if (unlikely(IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len +
- sa->digest_len > IP_MAXPACKET)) {
- IPSEC_LOG(DEBUG, IPSEC_ESP, "ipsec packet is too big\n");
+ if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
+ pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
+ RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
return -EINVAL;
}
- padding = rte_pktmbuf_append(m, pad_len + sa->digest_len);
-
- IPSEC_ASSERT(padding != NULL);
-
- ip = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
- sa->src, sa->dst);
+ padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
+ if (unlikely(padding == NULL)) {
+ RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
+ return -ENOSPC;
+ }
+ rte_prefetch0(padding);
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ &sa->src, &sa->dst);
+ esp = (struct esp_hdr *)(ip4 + 1);
+ break;
+ case IP6_TUNNEL:
+ ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
+ &sa->src, &sa->dst);
+ esp = (struct esp_hdr *)(ip6 + 1);
+ break;
+ case TRANSPORT:
+ new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
+ sizeof(struct esp_hdr) + sa->iv_len);
+ memmove(new_ip, ip4, ip_hdr_len);
+ esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+ if (likely(ip4->ip_v == IPVERSION)) {
+ ip4 = (struct ip *)new_ip;
+ ip4->ip_p = IPPROTO_ESP;
+ ip4->ip_len = htons(rte_pktmbuf_data_len(m));
+ } else {
+ ip6 = (struct ip6_hdr *)new_ip;
+ ip6->ip6_nxt = IPPROTO_ESP;
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ }
+ }
- esp = (struct esp_hdr *)(ip + 1);
- esp->spi = sa->spi;
- esp->seq = htonl(sa->seq++);
+ sa->seq++;
+ esp->spi = rte_cpu_to_be_32(sa->spi);
+ esp->seq = rte_cpu_to_be_32(sa->seq);
- IPSEC_LOG(DEBUG, IPSEC_ESP, "pktlen %u\n", rte_pktmbuf_pkt_len(m));
+ if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
+ random_iv_u64((uint64_t *)(esp + 1), sa->iv_len);
/* Fill pad_len using default sequential scheme */
for (i = 0; i < pad_len - 2; i++)
padding[i] = i + 1;
-
padding[pad_len - 2] = pad_len - 2;
- padding[pad_len - 1] = IPPROTO_IPIP;
+ padding[pad_len - 1] = nlp;
sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
sym_cop->m_src = m;
- sym_cop->cipher.data.offset = IP_ESP_HDR_SZ + sa->iv_len;
+ sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
+ sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ);
+ ip_hdr_len + sizeof(struct esp_hdr));
sym_cop->cipher.iv.length = sa->iv_len;
- sym_cop->auth.data.offset = sizeof(struct ip);
+ sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len +
pad_payload_len;
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len);
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- IP_ESP_HDR_SZ + sa->iv_len + pad_payload_len);
+ rte_pktmbuf_pkt_len(m) - sa->digest_len);
sym_cop->auth.digest.length = sa->digest_len;
- if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
- random_iv_u64((uint64_t *)sym_cop->cipher.iv.data,
- sym_cop->cipher.iv.length);
-
return 0;
}
int
-esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m __rte_unused,
+esp_outbound_post(struct rte_mbuf *m __rte_unused,
struct ipsec_sa *sa __rte_unused,
struct rte_crypto_op *cop)
{
- IPSEC_ASSERT(m != NULL);
- IPSEC_ASSERT(sa != NULL);
- IPSEC_ASSERT(cop != NULL);
+ RTE_ASSERT(m != NULL);
+ RTE_ASSERT(sa != NULL);
+ RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- IPSEC_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
return -1;
}
diff --git a/examples/ipsec-secgw/esp.h b/examples/ipsec-secgw/esp.h
index 31018823..fa5cc8af 100644
--- a/examples/ipsec-secgw/esp.h
+++ b/examples/ipsec-secgw/esp.h
@@ -46,21 +46,20 @@ struct esp_hdr {
/* Integrity Check Value - ICV */
};
-/* IPv4 Tunnel */
int
-esp4_tunnel_inbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_inbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_outbound_pre_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
int
-esp4_tunnel_outbound_post_crypto(struct rte_mbuf *m, struct ipsec_sa *sa,
+esp_outbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
#endif /* __RTE_IPSEC_XFORM_ESP_H__ */
diff --git a/examples/ipsec-secgw/ipip.h b/examples/ipsec-secgw/ipip.h
index 322076ce..ce25a2e2 100644
--- a/examples/ipsec-secgw/ipip.h
+++ b/examples/ipsec-secgw/ipip.h
@@ -37,67 +37,144 @@
#include <stdint.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <rte_mbuf.h>
-#define IPV6_VERSION (6)
-
-static inline struct ip *
-ip4ip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t src, uint32_t dst)
+static inline void *
+ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6,
+ struct ip_addr *src, struct ip_addr *dst)
{
- struct ip *inip, *outip;
+ struct ip *inip4, *outip4;
+ struct ip6_hdr *inip6, *outip6;
+ uint8_t ds_ecn;
- inip = rte_pktmbuf_mtod(m, struct ip*);
+ inip4 = rte_pktmbuf_mtod(m, struct ip *);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION);
- offset += sizeof(struct ip);
+ if (inip4->ip_v == IPVERSION) {
+ /* XXX This should be done by the forwarding engine instead */
+ inip4->ip_ttl -= 1;
+ ds_ecn = inip4->ip_tos;
+ } else {
+ inip6 = (struct ip6_hdr *)inip4;
+ /* XXX This should be done by the forwarding engine instead */
+ inip6->ip6_hops -= 1;
+ ds_ecn = ntohl(inip6->ip6_flow) >> 20;
+ }
+
+ if (is_ipv6) {
+ offset += sizeof(struct ip6_hdr);
+ outip6 = (struct ip6_hdr *)rte_pktmbuf_prepend(m, offset);
+
+ RTE_ASSERT(outip6 != NULL);
+
+ /* Per RFC4301 5.1.2.1 */
+ outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20);
+ outip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+
+ outip6->ip6_nxt = IPPROTO_ESP;
+ outip6->ip6_hops = IPDEFTTL;
- outip = (struct ip *)rte_pktmbuf_prepend(m, offset);
+ memcpy(&outip6->ip6_src.s6_addr, src, 16);
+ memcpy(&outip6->ip6_dst.s6_addr, dst, 16);
- IPSEC_ASSERT(outip != NULL);
+ return outip6;
+ }
+
+ offset += sizeof(struct ip);
+ outip4 = (struct ip *)rte_pktmbuf_prepend(m, offset);
+
+ RTE_ASSERT(outip4 != NULL);
/* Per RFC4301 5.1.2.1 */
- outip->ip_v = IPVERSION;
- outip->ip_hl = 5;
- outip->ip_tos = inip->ip_tos;
- outip->ip_len = htons(rte_pktmbuf_data_len(m));
+ outip4->ip_v = IPVERSION;
+ outip4->ip_hl = 5;
+ outip4->ip_tos = ds_ecn;
+ outip4->ip_len = htons(rte_pktmbuf_data_len(m));
+
+ outip4->ip_id = 0;
+ outip4->ip_off = 0;
+
+ outip4->ip_ttl = IPDEFTTL;
+ outip4->ip_p = IPPROTO_ESP;
+
+ outip4->ip_src.s_addr = src->ip4;
+ outip4->ip_dst.s_addr = dst->ip4;
- outip->ip_id = 0;
- outip->ip_off = 0;
+ return outip4;
+}
+
+static inline struct ip *
+ip4ip_outbound(struct rte_mbuf *m, uint32_t offset,
+ struct ip_addr *src, struct ip_addr *dst)
+{
+ return ipip_outbound(m, offset, 0, src, dst);
+}
- outip->ip_ttl = IPDEFTTL;
- outip->ip_p = IPPROTO_ESP;
+static inline struct ip6_hdr *
+ip6ip_outbound(struct rte_mbuf *m, uint32_t offset,
+ struct ip_addr *src, struct ip_addr *dst)
+{
+ return ipip_outbound(m, offset, 1, src, dst);
+}
- outip->ip_src.s_addr = src;
- outip->ip_dst.s_addr = dst;
+static inline void
+ip4_ecn_setup(struct ip *ip4)
+{
+ if (ip4->ip_tos & IPTOS_ECN_MASK)
+ ip4->ip_tos |= IPTOS_ECN_CE;
+}
- return outip;
+static inline void
+ip6_ecn_setup(struct ip6_hdr *ip6)
+{
+ if ((ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK)
+ ip6->ip6_flow = htonl(ntohl(ip6->ip6_flow) |
+ (IPTOS_ECN_CE << 20));
}
-static inline int
-ip4ip_inbound(struct rte_mbuf *m, uint32_t offset)
+static inline void
+ipip_inbound(struct rte_mbuf *m, uint32_t offset)
{
- struct ip *inip;
- struct ip *outip;
+ struct ip *inip4, *outip4;
+ struct ip6_hdr *inip6, *outip6;
+ uint32_t ip_len, set_ecn;
- outip = rte_pktmbuf_mtod(m, struct ip*);
+ outip4 = rte_pktmbuf_mtod(m, struct ip*);
- IPSEC_ASSERT(outip->ip_v == IPVERSION);
+ RTE_ASSERT(outip4->ip_v == IPVERSION || outip4->ip_v == IP6_VERSION);
- offset += sizeof(struct ip);
- inip = (struct ip *)rte_pktmbuf_adj(m, offset);
- IPSEC_ASSERT(inip->ip_v == IPVERSION || inip->ip_v == IPV6_VERSION);
+ if (outip4->ip_v == IPVERSION) {
+ ip_len = sizeof(struct ip);
+ set_ecn = ((outip4->ip_tos & IPTOS_ECN_CE) == IPTOS_ECN_CE);
+ } else {
+ outip6 = (struct ip6_hdr *)outip4;
+ ip_len = sizeof(struct ip6_hdr);
+ set_ecn = ntohl(outip6->ip6_flow) >> 20;
+ set_ecn = ((set_ecn & IPTOS_ECN_CE) == IPTOS_ECN_CE);
+ }
+
+ inip4 = (struct ip *)rte_pktmbuf_adj(m, offset + ip_len);
+ RTE_ASSERT(inip4->ip_v == IPVERSION || inip4->ip_v == IP6_VERSION);
/* Check packet is still bigger than IP header (inner) */
- IPSEC_ASSERT(rte_pktmbuf_pkt_len(m) > sizeof(struct ip));
+ RTE_ASSERT(rte_pktmbuf_pkt_len(m) > ip_len);
/* RFC4301 5.1.2.1 Note 6 */
- if ((inip->ip_tos & htons(IPTOS_ECN_ECT0 | IPTOS_ECN_ECT1)) &&
- ((outip->ip_tos & htons(IPTOS_ECN_CE)) == IPTOS_ECN_CE))
- inip->ip_tos |= htons(IPTOS_ECN_CE);
-
- return 0;
+ if (inip4->ip_v == IPVERSION) {
+ if (set_ecn)
+ ip4_ecn_setup(inip4);
+ /* XXX This should be done by the forwarding engine instead */
+ inip4->ip_ttl -= 1;
+ } else {
+ inip6 = (struct ip6_hdr *)inip4;
+ if (set_ecn)
+ ip6_ecn_setup(inip6);
+ /* XXX This should be done by the forwarding engine instead */
+ inip6->ip6_hops -= 1;
+ }
}
#endif /* __IPIP_H__ */
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 00ab2d84..f78743d0 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -38,6 +38,7 @@
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <string.h>
#include <sys/queue.h>
#include <stdarg.h>
@@ -65,6 +66,7 @@
#include <rte_mbuf.h>
#include <rte_acl.h>
#include <rte_lpm.h>
+#include <rte_lpm6.h>
#include <rte_hash.h>
#include <rte_jhash.h>
#include <rte_cryptodev.h>
@@ -192,7 +194,8 @@ struct lcore_conf {
struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
struct ipsec_ctx inbound;
struct ipsec_ctx outbound;
- struct rt_ctx *rt_ctx;
+ struct rt_ctx *rt4_ctx;
+ struct rt_ctx *rt6_ctx;
} __rte_cache_aligned;
static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
@@ -230,27 +233,39 @@ struct traffic_type {
};
struct ipsec_traffic {
- struct traffic_type ipsec4;
- struct traffic_type ipv4;
+ struct traffic_type ipsec;
+ struct traffic_type ip4;
+ struct traffic_type ip6;
};
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
uint8_t *nlp;
+ struct ether_hdr *eth;
- if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
- rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
- nlp = rte_pktmbuf_mtod_offset(pkt, uint8_t *,
- offsetof(struct ip, ip_p));
+ eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+ nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
if (*nlp == IPPROTO_ESP)
- t->ipsec4.pkts[(t->ipsec4.num)++] = pkt;
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
else {
- t->ipv4.data[t->ipv4.num] = nlp;
- t->ipv4.pkts[(t->ipv4.num)++] = pkt;
+ t->ip4.data[t->ip4.num] = nlp;
+ t->ip4.pkts[(t->ip4.num)++] = pkt;
+ }
+ } else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+ nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
+ nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
+ if (*nlp == IPPROTO_ESP)
+ t->ipsec.pkts[(t->ipsec.num)++] = pkt;
+ else {
+ t->ip6.data[t->ip6.num] = nlp;
+ t->ip6.pkts[(t->ip6.num)++] = pkt;
}
} else {
/* Unknown/Unsupported type, drop the packet */
+ RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
rte_pktmbuf_free(pkt);
}
}
@@ -261,8 +276,9 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
{
int32_t i;
- t->ipsec4.num = 0;
- t->ipv4.num = 0;
+ t->ipsec.num = 0;
+ t->ip4.num = 0;
+ t->ip6.num = 0;
for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
@@ -277,14 +293,27 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
static inline void
prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
{
- pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
- pkt->l3_len = sizeof(struct ip);
- pkt->l2_len = ETHER_HDR_LEN;
+ struct ip *ip;
+ struct ether_hdr *ethhdr;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+
+ ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
- struct ether_hdr *ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt,
- ETHER_HDR_LEN);
+ if (ip->ip_v == IPVERSION) {
+ pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
+ pkt->l3_len = sizeof(struct ip);
+ pkt->l2_len = ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ } else {
+ pkt->ol_flags |= PKT_TX_IPV6;
+ pkt->l3_len = sizeof(struct ip6_hdr);
+ pkt->l2_len = ETHER_HDR_LEN;
+
+ ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ }
- ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
memcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,
sizeof(struct ether_addr));
memcpy(&ethhdr->d_addr, &ethaddr_tbl[port].dst,
@@ -298,7 +327,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
const int32_t prefetch_offset = 2;
for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
- rte_prefetch0(pkts[i + prefetch_offset]->cacheline1);
+ rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
prepare_tx_pkt(pkts[i], port);
}
/* Process left packets */
@@ -355,94 +384,133 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
}
static inline void
-process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
- struct ipsec_traffic *traffic)
+inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip)
{
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_in, i, j;
- uint32_t sa_idx, res;
-
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.num, MAX_PKT_BURST);
+ uint32_t i, j, res, sa_idx;
- /* SP/ACL Inbound check ipsec and ipv4 */
- for (i = 0; i < nb_pkts_in; i++) {
- idx = traffic->ipv4.num++;
- m = traffic->ipsec4.pkts[i];
- traffic->ipv4.pkts[idx] = m;
- traffic->ipv4.data[idx] = rte_pktmbuf_mtod_offset(m,
- uint8_t *, offsetof(struct ip, ip_p));
- }
+ if (ip->num == 0)
+ return;
- rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
- traffic->ipv4.data, traffic->ipv4.res,
- traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
j = 0;
- for (i = 0; i < traffic->ipv4.num - nb_pkts_in; i++) {
- m = traffic->ipv4.pkts[i];
- res = traffic->ipv4.res[i];
- if (res & ~BYPASS) {
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ res = ip->res[i];
+ if (res & DISCARD) {
rte_pktmbuf_free(m);
continue;
}
- traffic->ipv4.pkts[j++] = m;
- }
- /* Check return SA SPI matches pkt SPI */
- for ( ; i < traffic->ipv4.num; i++) {
- m = traffic->ipv4.pkts[i];
- sa_idx = traffic->ipv4.res[i] & PROTECT_MASK;
- if (sa_idx == 0 || !inbound_sa_check(ipsec_ctx->sa_ctx,
- m, sa_idx)) {
+ if (res & BYPASS) {
+ ip->pkts[j++] = m;
+ continue;
+ }
+ /* Check return SA SPI matches pkt SPI */
+ sa_idx = ip->res[i] & PROTECT_MASK;
+ if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
rte_pktmbuf_free(m);
continue;
}
- traffic->ipv4.pkts[j++] = m;
+ ip->pkts[j++] = m;
}
- traffic->ipv4.num = j;
+ ip->num = j;
}
static inline void
-process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
+process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_out, i, j;
- uint32_t sa_idx, res;
+ uint16_t idx, nb_pkts_in, i;
- rte_acl_classify((struct rte_acl_ctx *)ipsec_ctx->sp_ctx,
- traffic->ipv4.data, traffic->ipv4.res,
- traffic->ipv4.num, DEFAULT_MAX_CATEGORIES);
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
- /* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec4.num; i++)
- rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
+ /* SP/ACL Inbound check ipsec and ip4 */
+ for (i = 0; i < nb_pkts_in; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
+ uint8_t *, offsetof(struct ip, ip_p));
+ } else if (ip->ip_v == IP6_VERSION) {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
+ uint8_t *,
+ offsetof(struct ip6_hdr, ip6_nxt));
+ } else
+ rte_pktmbuf_free(m);
+ }
+
+ inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4);
+
+ inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6);
+}
+
+static inline void
+outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
+ struct traffic_type *ipsec)
+{
+ struct rte_mbuf *m;
+ uint32_t i, j, sa_idx;
+
+ if (ip->num == 0)
+ return;
- traffic->ipsec4.num = 0;
+ rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
+ ip->num, DEFAULT_MAX_CATEGORIES);
j = 0;
- for (i = 0; i < traffic->ipv4.num; i++) {
- m = traffic->ipv4.pkts[i];
- res = traffic->ipv4.res[i];
- sa_idx = res & PROTECT_MASK;
- if ((res == 0) || (res & DISCARD))
+ for (i = 0; i < ip->num; i++) {
+ m = ip->pkts[i];
+ sa_idx = ip->res[i] & PROTECT_MASK;
+ if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
rte_pktmbuf_free(m);
else if (sa_idx != 0) {
- traffic->ipsec4.res[traffic->ipsec4.num] = sa_idx;
- traffic->ipsec4.pkts[traffic->ipsec4.num++] = m;
+ ipsec->res[ipsec->num] = sa_idx;
+ ipsec->pkts[ipsec->num++] = m;
} else /* BYPASS */
- traffic->ipv4.pkts[j++] = m;
+ ip->pkts[j++] = m;
}
- traffic->ipv4.num = j;
+ ip->num = j;
+}
+
+static inline void
+process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
+ struct ipsec_traffic *traffic)
+{
+ struct rte_mbuf *m;
+ uint16_t idx, nb_pkts_out, i;
+
+ /* Drop any IPsec traffic from protected ports */
+ for (i = 0; i < traffic->ipsec.num; i++)
+ rte_pktmbuf_free(traffic->ipsec.pkts[i]);
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.res, traffic->ipsec4.num,
+ traffic->ipsec.num = 0;
+
+ outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
+
+ outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.res, traffic->ipsec.num,
MAX_PKT_BURST);
for (i = 0; i < nb_pkts_out; i++) {
- idx = traffic->ipv4.num++;
- m = traffic->ipsec4.pkts[i];
- traffic->ipv4.pkts[idx] = m;
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
}
}
@@ -450,47 +518,72 @@ static inline void
process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
- uint16_t nb_pkts_in, i;
+ struct rte_mbuf *m;
+ uint32_t nb_pkts_in, i, idx;
/* Drop any IPv4 traffic from unprotected ports */
- for (i = 0; i < traffic->ipv4.num; i++)
- rte_pktmbuf_free(traffic->ipv4.pkts[i]);
+ for (i = 0; i < traffic->ip4.num; i++)
+ rte_pktmbuf_free(traffic->ip4.pkts[i]);
- traffic->ipv4.num = 0;
+ traffic->ip4.num = 0;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec4.pkts,
- traffic->ipsec4.num, MAX_PKT_BURST);
+ /* Drop any IPv6 traffic from unprotected ports */
+ for (i = 0; i < traffic->ip6.num; i++)
+ rte_pktmbuf_free(traffic->ip6.pkts[i]);
- for (i = 0; i < nb_pkts_in; i++)
- traffic->ipv4.pkts[i] = traffic->ipsec4.pkts[i];
+ traffic->ip6.num = 0;
- traffic->ipv4.num = nb_pkts_in;
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+
+ for (i = 0; i < nb_pkts_in; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
+ }
}
static inline void
process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
- uint16_t nb_pkts_out, i;
+ struct rte_mbuf *m;
+ uint32_t nb_pkts_out, i;
+ struct ip *ip;
/* Drop any IPsec traffic from protected ports */
- for (i = 0; i < traffic->ipsec4.num; i++)
- rte_pktmbuf_free(traffic->ipsec4.pkts[i]);
+ for (i = 0; i < traffic->ipsec.num; i++)
+ rte_pktmbuf_free(traffic->ipsec.pkts[i]);
- traffic->ipsec4.num = 0;
+ traffic->ipsec.num = 0;
- for (i = 0; i < traffic->ipv4.num; i++)
- traffic->ipv4.res[i] = single_sa_idx;
+ for (i = 0; i < traffic->ip4.num; i++)
+ traffic->ip4.res[i] = single_sa_idx;
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipv4.pkts,
- traffic->ipv4.res, traffic->ipv4.num,
+ for (i = 0; i < traffic->ip6.num; i++)
+ traffic->ip6.res[i] = single_sa_idx;
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
+ traffic->ip4.res, traffic->ip4.num,
MAX_PKT_BURST);
- traffic->ipv4.num = nb_pkts_out;
+ /* They all sue the same SA (ip4 or ip6 tunnel) */
+ m = traffic->ipsec.pkts[i];
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION)
+ traffic->ip4.num = nb_pkts_out;
+ else
+ traffic->ip6.num = nb_pkts_out;
}
static inline void
-route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
uint32_t hop[MAX_PKT_BURST * 2];
uint32_t dst_ip[MAX_PKT_BURST * 2];
@@ -518,6 +611,35 @@ route_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
}
static inline void
+route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
+{
+ int16_t hop[MAX_PKT_BURST * 2];
+ uint8_t dst_ip[MAX_PKT_BURST * 2][16];
+ uint8_t *ip6_dst;
+ uint16_t i, offset;
+
+ if (nb_pkts == 0)
+ return;
+
+ for (i = 0; i < nb_pkts; i++) {
+ offset = offsetof(struct ip6_hdr, ip6_dst);
+ ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset);
+ memcpy(&dst_ip[i][0], ip6_dst, 16);
+ }
+
+ rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip,
+ hop, nb_pkts);
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (hop[i] == -1) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+ send_single_packet(pkts[i], hop[i] & 0xff);
+ }
+}
+
+static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
uint8_t nb_pkts, uint8_t portid)
{
@@ -525,7 +647,7 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
prepare_traffic(pkts, &traffic, nb_pkts);
- if (single_sa) {
+ if (unlikely(single_sa)) {
if (UNPROTECTED_PORT(portid))
process_pkts_inbound_nosp(&qconf->inbound, &traffic);
else
@@ -537,7 +659,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
process_pkts_outbound(&qconf->outbound, &traffic);
}
- route_pkts(qconf->rt_ctx, traffic.ipv4.pkts, traffic.ipv4.num);
+ route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
+ route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
}
static inline void
@@ -576,12 +699,15 @@ main_loop(__attribute__((unused)) void *dummy)
rxql = qconf->rx_queue_list;
socket_id = rte_lcore_to_socket_id(lcore_id);
- qconf->rt_ctx = socket_ctx[socket_id].rt_ipv4;
- qconf->inbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_in;
- qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_in;
+ qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
+ qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
+ qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
+ qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
+ qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
qconf->inbound.cdev_map = cdev_map_in;
- qconf->outbound.sp_ctx = socket_ctx[socket_id].sp_ipv4_out;
- qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_ipv4_out;
+ qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
+ qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
+ qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
qconf->outbound.cdev_map = cdev_map_out;
if (qconf->nb_rx_queue == 0) {
@@ -636,8 +762,6 @@ check_params(void)
}
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -762,7 +886,7 @@ parse_config(const char *q_arg)
FLD_LCORE,
_NUM_FLD
};
- int long int_fld[_NUM_FLD];
+ unsigned long int_fld[_NUM_FLD];
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
@@ -1286,8 +1410,6 @@ main(int32_t argc, char **argv)
unprotected_port_mask);
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_params() < 0)
rte_exit(EXIT_FAILURE, "check_params failed\n");
@@ -1313,7 +1435,9 @@ main(int32_t argc, char **argv)
sa_init(&socket_ctx[socket_id], socket_id, ep);
- sp_init(&socket_ctx[socket_id], socket_id, ep);
+ sp4_init(&socket_ctx[socket_id], socket_id, ep);
+
+ sp6_init(&socket_ctx[socket_id], socket_id, ep);
rt_init(&socket_ctx[socket_id], socket_id, ep);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index baf30d4b..1e87d0df 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -42,11 +42,12 @@
#include <rte_hash.h>
#include "ipsec.h"
+#include "esp.h"
static inline int
create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
{
- uint32_t cdev_id_qp = 0;
+ unsigned long cdev_id_qp = 0;
int32_t ret;
struct cdev_key key = { 0 };
@@ -58,14 +59,15 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
(void **)&cdev_id_qp);
if (ret < 0) {
- IPSEC_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
+ RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u\n", key.lcore_id, key.cipher_algo,
key.auth_algo);
return -1;
}
- IPSEC_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
- "%u qp %u\n", sa->spi, ipsec_ctx->tbl[cdev_id_qp].id,
+ RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
+ "%u qp %u\n", sa->spi,
+ ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
sa->crypto_session = rte_cryptodev_sym_session_create(
@@ -79,7 +81,7 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
static inline void
enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
{
- int ret, i;
+ int32_t ret, i;
cqp->buf[cqp->len++] = cop;
@@ -87,7 +89,7 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
cqp->buf, cqp->len);
if (ret < cqp->len) {
- IPSEC_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
+ RTE_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
cqp->id, cqp->qp,
ret, cqp->len);
@@ -99,17 +101,21 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
}
}
-static inline uint16_t
-ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
- struct ipsec_sa *sas[], uint16_t nb_pkts, uint16_t max_pkts)
+static inline void
+ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ struct rte_mbuf *pkts[], struct ipsec_sa *sas[],
+ uint16_t nb_pkts)
{
- int ret = 0, i, j, nb_cops;
+ int32_t ret = 0, i;
struct ipsec_mbuf_metadata *priv;
- struct rte_crypto_op *cops[max_pkts];
struct ipsec_sa *sa;
- struct rte_mbuf *pkt;
for (i = 0; i < nb_pkts; i++) {
+ if (unlikely(sas[i] == NULL)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
rte_prefetch0(sas[i]);
rte_prefetch0(pkts[i]);
@@ -117,8 +123,6 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
sa = sas[i];
priv->sa = sa;
- IPSEC_ASSERT(sa != NULL);
-
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
rte_prefetch0(&priv->sym_cop);
@@ -133,17 +137,27 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
rte_crypto_op_attach_sym_session(&priv->cop,
sa->crypto_session);
- ret = sa->pre_crypto(pkts[i], sa, &priv->cop);
+ ret = xform_func(pkts[i], sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
- IPSEC_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
+ RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
}
+}
+
+static inline int
+ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ struct rte_mbuf *pkts[], uint16_t max_pkts)
+{
+ int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_crypto_op *cops[max_pkts];
+ struct ipsec_sa *sa;
+ struct rte_mbuf *pkt;
- nb_pkts = 0;
for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
struct cdev_qp *cqp;
@@ -166,9 +180,9 @@ ipsec_processing(struct ipsec_ctx *ipsec_ctx, struct rte_mbuf *pkts[],
priv = get_priv(pkt);
sa = priv->sa;
- IPSEC_ASSERT(sa != NULL);
+ RTE_ASSERT(sa != NULL);
- ret = sa->post_crypto(pkt, sa, cops[j]);
+ ret = xform_func(pkt, sa, cops[j]);
if (unlikely(ret))
rte_pktmbuf_free(pkt);
else
@@ -188,7 +202,9 @@ ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
- return ipsec_processing(ctx, pkts, sas, nb_pkts, len);
+ ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
+
+ return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
}
uint16_t
@@ -199,5 +215,7 @@ ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
- return ipsec_processing(ctx, pkts, sas, nb_pkts, len);
+ ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
+
+ return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
}
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index a13fdef9..0d2ee254 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -37,7 +37,6 @@
#include <stdint.h>
#include <rte_byteorder.h>
-#include <rte_ip.h>
#include <rte_crypto.h>
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
@@ -47,30 +46,18 @@
#define MAX_PKT_BURST 32
#define MAX_QP_PER_LCORE 256
-#ifdef IPSEC_DEBUG
-#define IPSEC_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
-#define IPSEC_LOG RTE_LOG
-#else
-#define IPSEC_ASSERT(exp) do {} while (0)
-#define IPSEC_LOG(...) do {} while (0)
-#endif /* IPSEC_DEBUG */
-
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
#define uint32_t_to_char(ip, a, b, c, d) do {\
- *a = (unsigned char)(ip >> 24 & 0xff);\
- *b = (unsigned char)(ip >> 16 & 0xff);\
- *c = (unsigned char)(ip >> 8 & 0xff);\
- *d = (unsigned char)(ip & 0xff);\
+ *a = (uint8_t)(ip >> 24 & 0xff);\
+ *b = (uint8_t)(ip >> 16 & 0xff);\
+ *c = (uint8_t)(ip >> 8 & 0xff);\
+ *d = (uint8_t)(ip & 0xff);\
} while (0)
#define DEFAULT_MAX_CATEGORIES 1
-#define IPSEC_SA_MAX_ENTRIES (64) /* must be power of 2, max 2 power 30 */
+#define IPSEC_SA_MAX_ENTRIES (128) /* must be power of 2, max 2 power 30 */
#define SPI2IDX(spi) (spi & (IPSEC_SA_MAX_ENTRIES - 1))
#define INVALID_SPI (0)
@@ -81,6 +68,8 @@ if (!(exp)) { \
#define IPSEC_XFORM_MAX 2
+#define IP6_VERSION (6)
+
struct rte_crypto_xform;
struct ipsec_xform;
struct rte_cryptodev_session;
@@ -88,25 +77,36 @@ struct rte_mbuf;
struct ipsec_sa;
-typedef int (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa,
+typedef int32_t (*ipsec_xform_fn)(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_op *cop);
+struct ip_addr {
+ union {
+ uint32_t ip4;
+ union {
+ uint64_t ip6[2];
+ uint8_t ip6_b[16];
+ };
+ };
+};
+
struct ipsec_sa {
uint32_t spi;
uint32_t cdev_id_qp;
- uint32_t src;
- uint32_t dst;
struct rte_cryptodev_sym_session *crypto_session;
- struct rte_crypto_sym_xform *xforms;
- ipsec_xform_fn pre_crypto;
- ipsec_xform_fn post_crypto;
+ uint32_t seq;
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
uint16_t digest_len;
uint16_t iv_len;
uint16_t block_size;
uint16_t flags;
- uint32_t seq;
+#define IP4_TUNNEL (1 << 0)
+#define IP6_TUNNEL (1 << 1)
+#define TRANSPORT (1 << 2)
+ struct ip_addr src;
+ struct ip_addr dst;
+ struct rte_crypto_sym_xform *xforms;
} __rte_cache_aligned;
struct ipsec_mbuf_metadata {
@@ -125,7 +125,8 @@ struct cdev_qp {
struct ipsec_ctx {
struct rte_hash *cdev_map;
- struct sp_ctx *sp_ctx;
+ struct sp_ctx *sp4_ctx;
+ struct sp_ctx *sp6_ctx;
struct sa_ctx *sa_ctx;
uint16_t nb_qps;
uint16_t last_qp;
@@ -139,11 +140,14 @@ struct cdev_key {
};
struct socket_ctx {
- struct sa_ctx *sa_ipv4_in;
- struct sa_ctx *sa_ipv4_out;
- struct sp_ctx *sp_ipv4_in;
- struct sp_ctx *sp_ipv4_out;
- struct rt_ctx *rt_ipv4;
+ struct sa_ctx *sa_in;
+ struct sa_ctx *sa_out;
+ struct sp_ctx *sp_ip4_in;
+ struct sp_ctx *sp_ip4_out;
+ struct sp_ctx *sp_ip6_in;
+ struct sp_ctx *sp_ip6_out;
+ struct rt_ctx *rt_ip4;
+ struct rt_ctx *rt_ip6;
struct rte_mempool *mbuf_pool;
};
@@ -179,12 +183,15 @@ outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
struct ipsec_sa *sa[], uint16_t nb_pkts);
void
-sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
+
+void
+sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
void
-sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
void
-rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep);
+rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep);
#endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/rt.c b/examples/ipsec-secgw/rt.c
index a6d0866a..fa5f0420 100644
--- a/examples/ipsec-secgw/rt.c
+++ b/examples/ipsec-secgw/rt.c
@@ -36,110 +36,237 @@
*/
#include <sys/types.h>
#include <rte_lpm.h>
+#include <rte_lpm6.h>
#include <rte_errno.h>
+#include <rte_ip.h>
#include "ipsec.h"
-#define RT_IPV4_MAX_RULES 64
+#define RT_IPV4_MAX_RULES 1024
+#define RT_IPV6_MAX_RULES 1024
-struct ipv4_route {
+struct ip4_route {
uint32_t ip;
- uint8_t depth;
- uint8_t if_out;
+ uint8_t depth;
+ uint8_t if_out;
};
-/* In the default routing table we have:
- * ep0 protected ports 0 and 1, and unprotected ports 2 and 3.
- */
-static struct ipv4_route rt_ipv4_ep0[] = {
+struct ip6_route {
+ uint8_t ip[16];
+ uint8_t depth;
+ uint8_t if_out;
+};
+
+static struct ip4_route rt_ip4_ep0[] = {
+ /* Outbound */
+ /* Tunnels */
{ IPv4(172, 16, 2, 5), 32, 0 },
- { IPv4(172, 16, 2, 6), 32, 0 },
- { IPv4(172, 16, 2, 7), 32, 1 },
- { IPv4(172, 16, 2, 8), 32, 1 },
+ { IPv4(172, 16, 2, 6), 32, 1 },
+ /* Transport */
+ { IPv4(192, 168, 175, 0), 24, 0 },
+ { IPv4(192, 168, 176, 0), 24, 1 },
+ /* Bypass */
+ { IPv4(192, 168, 240, 0), 24, 0 },
+ { IPv4(192, 168, 241, 0), 24, 1 },
+ /* Inbound */
+ /* Tunnels */
{ IPv4(192, 168, 115, 0), 24, 2 },
- { IPv4(192, 168, 116, 0), 24, 2 },
- { IPv4(192, 168, 117, 0), 24, 3 },
- { IPv4(192, 168, 118, 0), 24, 3 },
-
+ { IPv4(192, 168, 116, 0), 24, 3 },
+ { IPv4(192, 168, 65, 0), 24, 2 },
+ { IPv4(192, 168, 66, 0), 24, 3 },
+ /* Transport */
+ { IPv4(192, 168, 185, 0), 24, 2 },
+ { IPv4(192, 168, 186, 0), 24, 3 },
+ /* NULL */
{ IPv4(192, 168, 210, 0), 24, 2 },
+ { IPv4(192, 168, 211, 0), 24, 3 },
+ /* Bypass */
+ { IPv4(192, 168, 245, 0), 24, 2 },
+ { IPv4(192, 168, 246, 0), 24, 3 },
+};
- { IPv4(192, 168, 240, 0), 24, 2 },
- { IPv4(192, 168, 250, 0), 24, 0 }
+static struct ip6_route rt_ip6_ep0[] = {
+ /* Outbound */
+ /* Tunnels */
+ { { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 }, 116, 0 },
+ { { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 }, 116, 1 },
+ /* Transport */
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 0 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 1 },
+ /* Inbound */
+ /* Tunnels */
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb,
+ 0xbb, 0xbb, 0xbb, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66,
+ 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ /* Transport */
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
};
-/* In the default routing table we have:
- * ep1 protected ports 0 and 1, and unprotected ports 2 and 3.
- */
-static struct ipv4_route rt_ipv4_ep1[] = {
- { IPv4(172, 16, 1, 5), 32, 2 },
- { IPv4(172, 16, 1, 6), 32, 2 },
- { IPv4(172, 16, 1, 7), 32, 3 },
- { IPv4(172, 16, 1, 8), 32, 3 },
+static struct ip4_route rt_ip4_ep1[] = {
+ /* Outbound */
+ /* Tunnels */
+ { IPv4(172, 16, 1, 5), 32, 0 },
+ { IPv4(172, 16, 1, 6), 32, 1 },
+ /* Transport */
+ { IPv4(192, 168, 185, 0), 24, 0 },
+ { IPv4(192, 168, 186, 0), 24, 1 },
+ /* Bypass */
+ { IPv4(192, 168, 245, 0), 24, 0 },
+ { IPv4(192, 168, 246, 0), 24, 1 },
- { IPv4(192, 168, 105, 0), 24, 0 },
- { IPv4(192, 168, 106, 0), 24, 0 },
- { IPv4(192, 168, 107, 0), 24, 1 },
- { IPv4(192, 168, 108, 0), 24, 1 },
+ /* Inbound */
+ /* Tunnels */
+ { IPv4(192, 168, 105, 0), 24, 2 },
+ { IPv4(192, 168, 106, 0), 24, 3 },
+ { IPv4(192, 168, 55, 0), 24, 2 },
+ { IPv4(192, 168, 56, 0), 24, 3 },
+ /* Transport */
+ { IPv4(192, 168, 175, 0), 24, 2 },
+ { IPv4(192, 168, 176, 0), 24, 3 },
+ /* NULL */
+ { IPv4(192, 168, 200, 0), 24, 2 },
+ { IPv4(192, 168, 201, 0), 24, 3 },
+ /* Bypass */
+ { IPv4(192, 168, 240, 0), 24, 2 },
+ { IPv4(192, 168, 241, 0), 24, 3 },
+};
- { IPv4(192, 168, 200, 0), 24, 0 },
+static struct ip6_route rt_ip6_ep1[] = {
+ /* Outbound */
+ /* Tunnels */
+ { { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 }, 116, 0 },
+ { { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 }, 116, 1 },
+ /* Transport */
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 0 },
+ { { 0xff, 0xff, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 1 },
- { IPv4(192, 168, 240, 0), 24, 2 },
- { IPv4(192, 168, 250, 0), 24, 0 }
+ /* Inbound */
+ /* Tunnels */
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb,
+ 0xbb, 0xbb, 0xbb, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x55, 0x55, 0x55, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66,
+ 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
+ /* Transport */
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, 116, 2 },
+ { { 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00 }, 116, 3 },
};
void
-rt_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+rt_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
char name[PATH_MAX];
- unsigned i;
- int ret;
+ uint32_t i;
+ int32_t ret;
struct rte_lpm *lpm;
- struct ipv4_route *rt;
+ struct rte_lpm6 *lpm6;
+ struct ip4_route *rt;
+ struct ip6_route *rt6;
char a, b, c, d;
- unsigned nb_routes;
+ uint32_t nb_routes, nb_routes6;
struct rte_lpm_config conf = { 0 };
+ struct rte_lpm6_config conf6 = { 0 };
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->rt_ipv4 != NULL)
- rte_exit(EXIT_FAILURE, "Routing Table for socket %u already "
- "initialized\n", socket_id);
+ if (ctx->rt_ip4 != NULL)
+ rte_exit(EXIT_FAILURE, "IPv4 Routing Table for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ctx->rt_ip6 != NULL)
+ rte_exit(EXIT_FAILURE, "IPv6 Routing Table for socket %u "
+ "already initialized\n", socket_id);
- printf("Creating Routing Table (RT) context with %u max routes\n",
+ printf("Creating IPv4 Routing Table (RT) context with %u max routes\n",
RT_IPV4_MAX_RULES);
if (ep == 0) {
- rt = rt_ipv4_ep0;
- nb_routes = RTE_DIM(rt_ipv4_ep0);
+ rt = rt_ip4_ep0;
+ nb_routes = RTE_DIM(rt_ip4_ep0);
+ rt6 = rt_ip6_ep0;
+ nb_routes6 = RTE_DIM(rt_ip6_ep0);
} else if (ep == 1) {
- rt = rt_ipv4_ep1;
- nb_routes = RTE_DIM(rt_ipv4_ep1);
+ rt = rt_ip4_ep1;
+ nb_routes = RTE_DIM(rt_ip4_ep1);
+ rt6 = rt_ip6_ep1;
+ nb_routes6 = RTE_DIM(rt_ip6_ep1);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. Only 0 or 1 "
"supported.\n", ep);
/* create the LPM table */
- snprintf(name, sizeof(name), "%s_%u", "rt_ipv4", socket_id);
+ snprintf(name, sizeof(name), "%s_%u", "rt_ip4", socket_id);
conf.max_rules = RT_IPV4_MAX_RULES;
conf.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES;
lpm = rte_lpm_create(name, socket_id, &conf);
if (lpm == NULL)
- rte_exit(EXIT_FAILURE, "Unable to create LPM table "
- "on socket %d\n", socket_id);
+ rte_exit(EXIT_FAILURE, "Unable to create %s LPM table "
+ "on socket %d\n", name, socket_id);
/* populate the LPM table */
for (i = 0; i < nb_routes; i++) {
ret = rte_lpm_add(lpm, rt[i].ip, rt[i].depth, rt[i].if_out);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Unable to add entry num %u to "
- "LPM table on socket %d\n", i, socket_id);
+ rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s "
+ "LPM table on socket %d\n", i, name, socket_id);
uint32_t_to_char(rt[i].ip, &a, &b, &c, &d);
printf("LPM: Adding route %hhu.%hhu.%hhu.%hhu/%hhu (%hhu)\n",
a, b, c, d, rt[i].depth, rt[i].if_out);
}
- ctx->rt_ipv4 = (struct rt_ctx *)lpm;
+ snprintf(name, sizeof(name), "%s_%u", "rt_ip6", socket_id);
+ conf6.max_rules = RT_IPV6_MAX_RULES;
+ conf6.number_tbl8s = RTE_LPM_TBL8_NUM_ENTRIES;
+ lpm6 = rte_lpm6_create(name, socket_id, &conf6);
+ if (lpm6 == NULL)
+ rte_exit(EXIT_FAILURE, "Unable to create %s LPM table "
+ "on socket %d\n", name, socket_id);
+
+ /* populate the LPM table */
+ for (i = 0; i < nb_routes6; i++) {
+ ret = rte_lpm6_add(lpm6, rt6[i].ip, rt6[i].depth,
+ rt6[i].if_out);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Fail to add entry num %u to %s "
+ "LPM table on socket %d\n", i, name, socket_id);
+
+ printf("LPM6: Adding route "
+ " %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx/%hhx (%hhx)\n",
+ (uint16_t)((rt6[i].ip[0] << 8) | rt6[i].ip[1]),
+ (uint16_t)((rt6[i].ip[2] << 8) | rt6[i].ip[3]),
+ (uint16_t)((rt6[i].ip[4] << 8) | rt6[i].ip[5]),
+ (uint16_t)((rt6[i].ip[6] << 8) | rt6[i].ip[7]),
+ (uint16_t)((rt6[i].ip[8] << 8) | rt6[i].ip[9]),
+ (uint16_t)((rt6[i].ip[10] << 8) | rt6[i].ip[11]),
+ (uint16_t)((rt6[i].ip[12] << 8) | rt6[i].ip[13]),
+ (uint16_t)((rt6[i].ip[14] << 8) | rt6[i].ip[15]),
+ rt6[i].depth, rt6[i].if_out);
+ }
+
+ ctx->rt_ip4 = (struct rt_ctx *)lpm;
+ ctx->rt_ip6 = (struct rt_ctx *)lpm6;
}
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index b6260ede..ab18b811 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -37,170 +37,200 @@
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <rte_memzone.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
+#include <rte_ip.h>
#include "ipsec.h"
#include "esp.h"
-/* SAs EP0 Outbound */
-const struct ipsec_sa sa_ep0_out[] = {
- { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
+/* SAs Outbound */
+const struct ipsec_sa sa_out[] = {
+ {
+ .spi = 5,
+ .src.ip4 = IPv4(172, 16, 1, 5),
+ .dst.ip4 = IPv4(172, 16, 2, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 6,
+ .src.ip4 = IPv4(172, 16, 1, 6),
+ .dst.ip4 = IPv4(172, 16, 2, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 10,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 11,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 15,
+ .src.ip4 = IPv4(172, 16, 1, 5),
+ .dst.ip4 = IPv4(172, 16, 2, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 16,
+ .src.ip4 = IPv4(172, 16, 1, 6),
+ .dst.ip4 = IPv4(172, 16, 2, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 25,
+ .src.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 },
+ .dst.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
+ {
+ .spi = 26,
+ .src.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 },
+ .dst.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
};
-/* SAs EP0 Inbound */
-const struct ipsec_sa sa_ep0_in[] = {
- { 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
-};
-
-/* SAs EP1 Outbound */
-const struct ipsec_sa sa_ep1_out[] = {
- { 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
- NULL, NULL,
- esp4_tunnel_outbound_pre_crypto,
- esp4_tunnel_outbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
-};
-
-/* SAs EP1 Inbound */
-const struct ipsec_sa sa_ep1_in[] = {
- { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
- 12, 16, 16,
- 0, 0 },
- { 9, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
- NULL, NULL,
- esp4_tunnel_inbound_pre_crypto,
- esp4_tunnel_inbound_post_crypto,
- RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
- 0, 0, 4,
- 0, 0 },
+/* SAs Inbound */
+const struct ipsec_sa sa_in[] = {
+ {
+ .spi = 105,
+ .src.ip4 = IPv4(172, 16, 2, 5),
+ .dst.ip4 = IPv4(172, 16, 1, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 106,
+ .src.ip4 = IPv4(172, 16, 2, 6),
+ .dst.ip4 = IPv4(172, 16, 1, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 110,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 111,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = TRANSPORT
+ },
+ {
+ .spi = 115,
+ .src.ip4 = IPv4(172, 16, 2, 5),
+ .dst.ip4 = IPv4(172, 16, 1, 5),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 116,
+ .src.ip4 = IPv4(172, 16, 2, 6),
+ .dst.ip4 = IPv4(172, 16, 1, 6),
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ .digest_len = 0,
+ .iv_len = 0,
+ .block_size = 4,
+ .flags = IP4_TUNNEL
+ },
+ {
+ .spi = 125,
+ .src.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x55, 0x55 },
+ .dst.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x55, 0x55 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
+ {
+ .spi = 126,
+ .src.ip6_b = { 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x66, 0x66 },
+ .dst.ip6_b = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x66, 0x66 },
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .digest_len = 12,
+ .iv_len = 16,
+ .block_size = 16,
+ .flags = IP6_TUNNEL
+ },
};
static uint8_t cipher_key[256] = "sixteenbytes key";
@@ -265,11 +295,11 @@ struct sa_ctx {
};
static struct sa_ctx *
-sa_ipv4_create(const char *name, int socket_id)
+sa_create(const char *name, int32_t socket_id)
{
char s[PATH_MAX];
struct sa_ctx *sa_ctx;
- unsigned mz_size;
+ uint32_t mz_size;
const struct rte_memzone *mz;
snprintf(s, sizeof(s), "%s_%u", name, socket_id);
@@ -294,10 +324,10 @@ sa_ipv4_create(const char *name, int socket_id)
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries, unsigned inbound)
+ uint32_t nb_entries, uint32_t inbound)
{
struct ipsec_sa *sa;
- unsigned i, idx;
+ uint32_t i, idx;
for (i = 0; i < nb_entries; i++) {
idx = SPI2IDX(entries[i].spi);
@@ -308,8 +338,14 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
return -EINVAL;
}
*sa = entries[i];
- sa->src = rte_cpu_to_be_32(sa->src);
- sa->dst = rte_cpu_to_be_32(sa->dst);
+ sa->seq = 0;
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ sa->src.ip4 = rte_cpu_to_be_32(sa->src.ip4);
+ sa->dst.ip4 = rte_cpu_to_be_32(sa->dst.ip4);
+ }
+
if (inbound) {
if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
sa_ctx->xf[idx].a = null_auth_xf;
@@ -337,65 +373,65 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries)
+ uint32_t nb_entries)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 0);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
- unsigned nb_entries)
+ uint32_t nb_entries)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 1);
}
void
-sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+sa_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
const struct ipsec_sa *sa_out_entries, *sa_in_entries;
- unsigned nb_out_entries, nb_in_entries;
+ uint32_t nb_out_entries, nb_in_entries;
const char *name;
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->sa_ipv4_in != NULL)
+ if (ctx->sa_in != NULL)
rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
"initialized\n", socket_id);
- if (ctx->sa_ipv4_out != NULL)
+ if (ctx->sa_out != NULL)
rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
"initialized\n", socket_id);
if (ep == 0) {
- sa_out_entries = sa_ep0_out;
- nb_out_entries = RTE_DIM(sa_ep0_out);
- sa_in_entries = sa_ep0_in;
- nb_in_entries = RTE_DIM(sa_ep0_in);
+ sa_out_entries = sa_out;
+ nb_out_entries = RTE_DIM(sa_out);
+ sa_in_entries = sa_in;
+ nb_in_entries = RTE_DIM(sa_in);
} else if (ep == 1) {
- sa_out_entries = sa_ep1_out;
- nb_out_entries = RTE_DIM(sa_ep1_out);
- sa_in_entries = sa_ep1_in;
- nb_in_entries = RTE_DIM(sa_ep1_in);
+ sa_out_entries = sa_in;
+ nb_out_entries = RTE_DIM(sa_in);
+ sa_in_entries = sa_out;
+ nb_in_entries = RTE_DIM(sa_out);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
"Only 0 or 1 supported.\n", ep);
- name = "sa_ipv4_in";
- ctx->sa_ipv4_in = sa_ipv4_create(name, socket_id);
- if (ctx->sa_ipv4_in == NULL)
+ name = "sa_in";
+ ctx->sa_in = sa_create(name, socket_id);
+ if (ctx->sa_in == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
"in socket %d\n", rte_errno, name, socket_id);
- name = "sa_ipv4_out";
- ctx->sa_ipv4_out = sa_ipv4_create(name, socket_id);
- if (ctx->sa_ipv4_out == NULL)
+ name = "sa_out";
+ ctx->sa_out = sa_create(name, socket_id);
+ if (ctx->sa_out == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
"in socket %d\n", rte_errno, name, socket_id);
- sa_in_add_rules(ctx->sa_ipv4_in, sa_in_entries, nb_in_entries);
+ sa_in_add_rules(ctx->sa_in, sa_in_entries, nb_in_entries);
- sa_out_add_rules(ctx->sa_ipv4_out, sa_out_entries, nb_out_entries);
+ sa_out_add_rules(ctx->sa_out, sa_out_entries, nb_out_entries);
}
int
@@ -408,38 +444,66 @@ inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
}
+static inline void
+single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
+ struct ipsec_sa **sa_ret)
+{
+ struct esp_hdr *esp;
+ struct ip *ip;
+ uint32_t *src4_addr;
+ uint8_t *src6_addr;
+ struct ipsec_sa *sa;
+
+ *sa_ret = NULL;
+
+ ip = rte_pktmbuf_mtod(pkt, struct ip *);
+ if (ip->ip_v == IPVERSION)
+ esp = (struct esp_hdr *)(ip + 1);
+ else
+ esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
+
+ if (esp->spi == INVALID_SPI)
+ return;
+
+ sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
+ if (rte_be_to_cpu_32(esp->spi) != sa->spi)
+ return;
+
+ switch (sa->flags) {
+ case IP4_TUNNEL:
+ src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
+ if ((ip->ip_v == IPVERSION) &&
+ (sa->src.ip4 == *src4_addr) &&
+ (sa->dst.ip4 == *(src4_addr + 1)))
+ *sa_ret = sa;
+ break;
+ case IP6_TUNNEL:
+ src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
+ if ((ip->ip_v == IP6_VERSION) &&
+ !memcmp(&sa->src.ip6, src6_addr, 16) &&
+ !memcmp(&sa->dst.ip6, src6_addr + 16, 16))
+ *sa_ret = sa;
+ break;
+ case TRANSPORT:
+ *sa_ret = sa;
+ }
+}
+
void
inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
struct ipsec_sa *sa[], uint16_t nb_pkts)
{
- unsigned i;
- uint32_t *src, spi;
-
- for (i = 0; i < nb_pkts; i++) {
- spi = rte_pktmbuf_mtod_offset(pkts[i], struct esp_hdr *,
- sizeof(struct ip))->spi;
-
- if (spi == INVALID_SPI)
- continue;
+ uint32_t i;
- sa[i] = &sa_ctx->sa[SPI2IDX(spi)];
- if (spi != sa[i]->spi) {
- sa[i] = NULL;
- continue;
- }
-
- src = rte_pktmbuf_mtod_offset(pkts[i], uint32_t *,
- offsetof(struct ip, ip_src));
- if ((sa[i]->src != *src) || (sa[i]->dst != *(src + 1)))
- sa[i] = NULL;
- }
+ for (i = 0; i < nb_pkts; i++)
+ single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
}
void
outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
struct ipsec_sa *sa[], uint16_t nb_pkts)
{
- unsigned i;
+ uint32_t i;
for (i = 0; i < nb_pkts; i++)
sa[i] = &sa_ctx->sa[sa_idx[i]];
diff --git a/examples/ipsec-secgw/sp.c b/examples/ipsec-secgw/sp4.c
index 4f167301..9c4b256b 100644
--- a/examples/ipsec-secgw/sp.c
+++ b/examples/ipsec-secgw/sp4.c
@@ -39,6 +39,7 @@
#include <netinet/ip.h>
#include <rte_acl.h>
+#include <rte_ip.h>
#include "ipsec.h"
@@ -71,7 +72,7 @@ enum {
RTE_ACL_IPV4_NUM
};
-struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
+struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = {
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
@@ -110,9 +111,9 @@ struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
},
};
-RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ipv4_defs));
+RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ip4_defs));
-const struct acl4_rules acl4_rules_in[] = {
+const struct acl4_rules acl4_rules_out[] = {
{
.data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
/* destination IPv4 */
@@ -124,7 +125,7 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2},
+ .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 106, 0),
.mask_range.u32 = 24,},
@@ -134,9 +135,9 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(7), .category_mask = 1, .priority = 3},
+ .data = {.userdata = PROTECT(10), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 107, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 175, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -144,9 +145,9 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(8), .category_mask = 1, .priority = 4},
+ .data = {.userdata = PROTECT(11), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 108, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 176, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -154,7 +155,7 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(9), .category_mask = 1, .priority = 5},
+ .data = {.userdata = PROTECT(15), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 200, 0),
.mask_range.u32 = 24,},
@@ -164,9 +165,49 @@ const struct acl4_rules acl4_rules_in[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = BYPASS, .category_mask = 1, .priority = 6},
+ .data = {.userdata = PROTECT(16), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 250, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 201, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(25), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 55, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(26), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 56, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 240, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 241, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -175,9 +216,9 @@ const struct acl4_rules acl4_rules_in[] = {
}
};
-const struct acl4_rules acl4_rules_out[] = {
+const struct acl4_rules acl4_rules_in[] = {
{
- .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
+ .data = {.userdata = PROTECT(105), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 115, 0),
.mask_range.u32 = 24,},
@@ -187,7 +228,7 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 2},
+ .data = {.userdata = PROTECT(106), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 116, 0),
.mask_range.u32 = 24,},
@@ -197,9 +238,9 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(7), .category_mask = 1, .priority = 3},
+ .data = {.userdata = PROTECT(110), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 117, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 185, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -207,9 +248,9 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(8), .category_mask = 1, .priority = 4},
+ .data = {.userdata = PROTECT(111), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 118, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 186, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -217,7 +258,7 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = PROTECT(9), .category_mask = 1, .priority = 5},
+ .data = {.userdata = PROTECT(115), .category_mask = 1, .priority = 1},
/* destination IPv4 */
.field[2] = {.value.u32 = IPv4(192, 168, 210, 0),
.mask_range.u32 = 24,},
@@ -227,9 +268,49 @@ const struct acl4_rules acl4_rules_out[] = {
.field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
},
{
- .data = {.userdata = BYPASS, .category_mask = 1, .priority = 6},
+ .data = {.userdata = PROTECT(116), .category_mask = 1, .priority = 1},
/* destination IPv4 */
- .field[2] = {.value.u32 = IPv4(192, 168, 240, 0),
+ .field[2] = {.value.u32 = IPv4(192, 168, 211, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(125), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 65, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(126), .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 66, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 245, 0),
+ .mask_range.u32 = 24,},
+ /* source port */
+ .field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[4] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = BYPASS, .category_mask = 1, .priority = 1},
+ /* destination IPv4 */
+ .field[2] = {.value.u32 = IPv4(192, 168, 246, 0),
.mask_range.u32 = 24,},
/* source port */
.field[3] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
@@ -239,9 +320,9 @@ const struct acl4_rules acl4_rules_out[] = {
};
static void
-print_one_ipv4_rule(const struct acl4_rules *rule, int extra)
+print_one_ip4_rule(const struct acl4_rules *rule, int32_t extra)
{
- unsigned char a, b, c, d;
+ uint8_t a, b, c, d;
uint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32,
&a, &b, &c, &d);
@@ -266,20 +347,20 @@ print_one_ipv4_rule(const struct acl4_rules *rule, int extra)
}
static inline void
-dump_ipv4_rules(const struct acl4_rules *rule, int num, int extra)
+dump_ip4_rules(const struct acl4_rules *rule, int32_t num, int32_t extra)
{
- int i;
+ int32_t i;
for (i = 0; i < num; i++, rule++) {
printf("\t%d:", i + 1);
- print_one_ipv4_rule(rule, extra);
+ print_one_ip4_rule(rule, extra);
printf("\n");
}
}
static struct rte_acl_ctx *
-acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
- unsigned rules_nb)
+acl4_init(const char *name, int32_t socketid, const struct acl4_rules *rules,
+ uint32_t rules_nb)
{
char s[PATH_MAX];
struct rte_acl_param acl_param;
@@ -294,11 +375,11 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
snprintf(s, sizeof(s), "%s_%d", name, socketid);
printf("IPv4 %s entries [%u]:\n", s, rules_nb);
- dump_ipv4_rules(rules, rules_nb, 1);
+ dump_ip4_rules(rules, rules_nb, 1);
acl_param.name = s;
acl_param.socket_id = socketid;
- acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ipv4_defs));
+ acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip4_defs));
acl_param.max_rule_num = MAX_ACL_RULE_NUM;
ctx = rte_acl_create(&acl_param);
@@ -313,8 +394,8 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
memset(&acl_build_param, 0, sizeof(acl_build_param));
acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
- acl_build_param.num_fields = RTE_DIM(ipv4_defs);
- memcpy(&acl_build_param.defs, ipv4_defs, sizeof(ipv4_defs));
+ acl_build_param.num_fields = RTE_DIM(ip4_defs);
+ memcpy(&acl_build_param.defs, ip4_defs, sizeof(ip4_defs));
if (rte_acl_build(ctx, &acl_build_param) != 0)
rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n");
@@ -325,42 +406,42 @@ acl4_init(const char *name, int socketid, const struct acl4_rules *rules,
}
void
-sp_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
+sp4_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
{
const char *name;
const struct acl4_rules *rules_out, *rules_in;
- unsigned nb_out_rules, nb_in_rules;
+ uint32_t nb_out_rules, nb_in_rules;
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
- if (ctx->sp_ipv4_in != NULL)
+ if (ctx->sp_ip4_in != NULL)
rte_exit(EXIT_FAILURE, "Inbound SP DB for socket %u already "
"initialized\n", socket_id);
- if (ctx->sp_ipv4_out != NULL)
+ if (ctx->sp_ip4_out != NULL)
rte_exit(EXIT_FAILURE, "Outbound SP DB for socket %u already "
"initialized\n", socket_id);
if (ep == 0) {
- rules_out = acl4_rules_in;
- nb_out_rules = RTE_DIM(acl4_rules_in);
- rules_in = acl4_rules_out;
- nb_in_rules = RTE_DIM(acl4_rules_out);
- } else if (ep == 1) {
rules_out = acl4_rules_out;
nb_out_rules = RTE_DIM(acl4_rules_out);
rules_in = acl4_rules_in;
nb_in_rules = RTE_DIM(acl4_rules_in);
+ } else if (ep == 1) {
+ rules_out = acl4_rules_in;
+ nb_out_rules = RTE_DIM(acl4_rules_in);
+ rules_in = acl4_rules_out;
+ nb_in_rules = RTE_DIM(acl4_rules_out);
} else
rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
"Only 0 or 1 supported.\n", ep);
- name = "sp_ipv4_in";
- ctx->sp_ipv4_in = (struct sp_ctx *)acl4_init(name, socket_id,
+ name = "sp_ip4_in";
+ ctx->sp_ip4_in = (struct sp_ctx *)acl4_init(name, socket_id,
rules_in, nb_in_rules);
- name = "sp_ipv4_out";
- ctx->sp_ipv4_out = (struct sp_ctx *)acl4_init(name, socket_id,
+ name = "sp_ip4_out";
+ ctx->sp_ip4_out = (struct sp_ctx *)acl4_init(name, socket_id,
rules_out, nb_out_rules);
}
diff --git a/examples/ipsec-secgw/sp6.c b/examples/ipsec-secgw/sp6.c
new file mode 100644
index 00000000..1dda11a4
--- /dev/null
+++ b/examples/ipsec-secgw/sp6.c
@@ -0,0 +1,448 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Security Policies
+ */
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <netinet/ip6.h>
+
+#include <rte_acl.h>
+#include <rte_ip.h>
+
+#include "ipsec.h"
+
+#define MAX_ACL_RULE_NUM 1000
+
+enum {
+ IP6_PROTO,
+ IP6_SRC0,
+ IP6_SRC1,
+ IP6_SRC2,
+ IP6_SRC3,
+ IP6_DST0,
+ IP6_DST1,
+ IP6_DST2,
+ IP6_DST3,
+ IP6_SRCP,
+ IP6_DSTP,
+ IP6_NUM
+};
+
+#define IP6_ADDR_SIZE 16
+
+struct rte_acl_field_def ip6_defs[IP6_NUM] = {
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = IP6_PROTO,
+ .input_index = IP6_PROTO,
+ .offset = 0,
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC0,
+ .input_index = IP6_SRC0,
+ .offset = 2
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC1,
+ .input_index = IP6_SRC1,
+ .offset = 6
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC2,
+ .input_index = IP6_SRC2,
+ .offset = 10
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_SRC3,
+ .input_index = IP6_SRC3,
+ .offset = 14
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST0,
+ .input_index = IP6_DST0,
+ .offset = 18
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST1,
+ .input_index = IP6_DST1,
+ .offset = 22
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST2,
+ .input_index = IP6_DST2,
+ .offset = 26
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = 4,
+ .field_index = IP6_DST3,
+ .input_index = IP6_DST3,
+ .offset = 30
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = IP6_SRCP,
+ .input_index = IP6_SRCP,
+ .offset = 34
+ },
+ {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = IP6_DSTP,
+ .input_index = IP6_SRCP,
+ .offset = 36
+ }
+};
+
+RTE_ACL_RULE_DEF(acl6_rules, RTE_DIM(ip6_defs));
+
+const struct acl6_rules acl6_rules_out[] = {
+ {
+ .data = {.userdata = PROTECT(5), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x55555555, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(6), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x66666666, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(10), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x00000000, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(11), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(25), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xaaaaaaaa, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(26), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xbbbbbbbb, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ }
+};
+
+const struct acl6_rules acl6_rules_in[] = {
+ {
+ .data = {.userdata = PROTECT(15), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x55555555, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(16), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x66666666, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(110), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x00000000, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(111), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0x11111111, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(125), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xaaaaaaaa, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ },
+ {
+ .data = {.userdata = PROTECT(126), .category_mask = 1, .priority = 1},
+ /* destination IPv6 */
+ .field[5] = {.value.u32 = 0xffff0000, .mask_range.u32 = 32,},
+ .field[6] = {.value.u32 = 0x0, .mask_range.u32 = 32,},
+ .field[7] = {.value.u32 = 0xbbbbbbbb, .mask_range.u32 = 32,},
+ .field[8] = {.value.u32 = 0x0, .mask_range.u32 = 0,},
+ /* source port */
+ .field[9] = {.value.u16 = 0, .mask_range.u16 = 0xffff,},
+ /* destination port */
+ .field[10] = {.value.u16 = 0, .mask_range.u16 = 0xffff,}
+ }
+};
+
+static inline void
+print_one_ip6_rule(const struct acl6_rules *rule, int32_t extra)
+{
+ uint8_t a, b, c, d;
+
+ uint32_t_to_char(rule->field[IP6_SRC0].value.u32,
+ &a, &b, &c, &d);
+ printf("%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC1].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC2].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_SRC3].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
+ rule->field[IP6_SRC0].mask_range.u32
+ + rule->field[IP6_SRC1].mask_range.u32
+ + rule->field[IP6_SRC2].mask_range.u32
+ + rule->field[IP6_SRC3].mask_range.u32);
+
+ uint32_t_to_char(rule->field[IP6_DST0].value.u32,
+ &a, &b, &c, &d);
+ printf("%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST1].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST2].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x", a, b, c, d);
+ uint32_t_to_char(rule->field[IP6_DST3].value.u32,
+ &a, &b, &c, &d);
+ printf(":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
+ rule->field[IP6_DST0].mask_range.u32
+ + rule->field[IP6_DST1].mask_range.u32
+ + rule->field[IP6_DST2].mask_range.u32
+ + rule->field[IP6_DST3].mask_range.u32);
+
+ printf("%hu : %hu %hu : %hu 0x%hhx/0x%hhx ",
+ rule->field[IP6_SRCP].value.u16,
+ rule->field[IP6_SRCP].mask_range.u16,
+ rule->field[IP6_DSTP].value.u16,
+ rule->field[IP6_DSTP].mask_range.u16,
+ rule->field[IP6_PROTO].value.u8,
+ rule->field[IP6_PROTO].mask_range.u8);
+ if (extra)
+ printf("0x%x-0x%x-0x%x ",
+ rule->data.category_mask,
+ rule->data.priority,
+ rule->data.userdata);
+}
+
+static inline void
+dump_ip6_rules(const struct acl6_rules *rule, int32_t num, int32_t extra)
+{
+ int32_t i;
+
+ for (i = 0; i < num; i++, rule++) {
+ printf("\t%d:", i + 1);
+ print_one_ip6_rule(rule, extra);
+ printf("\n");
+ }
+}
+
+static struct rte_acl_ctx *
+acl6_init(const char *name, int32_t socketid, const struct acl6_rules *rules,
+ uint32_t rules_nb)
+{
+ char s[PATH_MAX];
+ struct rte_acl_param acl_param;
+ struct rte_acl_config acl_build_param;
+ struct rte_acl_ctx *ctx;
+
+ printf("Creating SP context with %u max rules\n", MAX_ACL_RULE_NUM);
+
+ memset(&acl_param, 0, sizeof(acl_param));
+
+ /* Create ACL contexts */
+ snprintf(s, sizeof(s), "%s_%d", name, socketid);
+
+ printf("IPv4 %s entries [%u]:\n", s, rules_nb);
+ dump_ip6_rules(rules, rules_nb, 1);
+
+ acl_param.name = s;
+ acl_param.socket_id = socketid;
+ acl_param.rule_size = RTE_ACL_RULE_SZ(RTE_DIM(ip6_defs));
+ acl_param.max_rule_num = MAX_ACL_RULE_NUM;
+
+ ctx = rte_acl_create(&acl_param);
+ if (ctx == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to create ACL context\n");
+
+ if (rte_acl_add_rules(ctx, (const struct rte_acl_rule *)rules,
+ rules_nb) < 0)
+ rte_exit(EXIT_FAILURE, "add rules failed\n");
+
+ /* Perform builds */
+ memset(&acl_build_param, 0, sizeof(acl_build_param));
+
+ acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
+ acl_build_param.num_fields = RTE_DIM(ip6_defs);
+ memcpy(&acl_build_param.defs, ip6_defs, sizeof(ip6_defs));
+
+ if (rte_acl_build(ctx, &acl_build_param) != 0)
+ rte_exit(EXIT_FAILURE, "Failed to build ACL trie\n");
+
+ rte_acl_dump(ctx);
+
+ return ctx;
+}
+
+void
+sp6_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t ep)
+{
+ const char *name;
+ const struct acl6_rules *rules_out, *rules_in;
+ uint32_t nb_out_rules, nb_in_rules;
+
+ if (ctx == NULL)
+ rte_exit(EXIT_FAILURE, "NULL context.\n");
+
+ if (ctx->sp_ip6_in != NULL)
+ rte_exit(EXIT_FAILURE, "Inbound IPv6 SP DB for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ctx->sp_ip6_out != NULL)
+ rte_exit(EXIT_FAILURE, "Outbound IPv6 SP DB for socket %u "
+ "already initialized\n", socket_id);
+
+ if (ep == 0) {
+ rules_out = acl6_rules_out;
+ nb_out_rules = RTE_DIM(acl6_rules_out);
+ rules_in = acl6_rules_in;
+ nb_in_rules = RTE_DIM(acl6_rules_in);
+ } else if (ep == 1) {
+ rules_out = acl6_rules_in;
+ nb_out_rules = RTE_DIM(acl6_rules_in);
+ rules_in = acl6_rules_out;
+ nb_in_rules = RTE_DIM(acl6_rules_out);
+ } else
+ rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
+ "Only 0 or 1 supported.\n", ep);
+
+ name = "sp_ip6_in";
+ ctx->sp_ip6_in = (struct sp_ctx *)acl6_init(name, socket_id,
+ rules_in, nb_in_rules);
+
+ name = "sp_ip6_out";
+ ctx->sp_ip6_out = (struct sp_ctx *)acl6_init(name, socket_id,
+ rules_out, nb_out_rules);
+}
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 96b41578..f013d927 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -321,7 +321,7 @@ mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
/* Construct Ethernet header. */
ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr));
- RTE_MBUF_ASSERT(ethdr != NULL);
+ RTE_ASSERT(ethdr != NULL);
ether_addr_copy(dest_addr, &ethdr->d_addr);
ether_addr_copy(&ports_eth_addr[port], &ethdr->s_addr);
@@ -353,7 +353,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
/* Remove the Ethernet header from the input packet */
iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
- RTE_MBUF_ASSERT(iphdr != NULL);
+ RTE_ASSERT(iphdr != NULL);
dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
diff --git a/examples/kni/main.c b/examples/kni/main.c
index a5297f28..f9fc61e0 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -318,8 +318,6 @@ main_loop(__rte_unused void *arg)
};
enum lcore_rxtx flag = LCORE_NONE;
- nb_ports = (uint8_t)(nb_ports < RTE_MAX_ETHPORTS ?
- nb_ports : RTE_MAX_ETHPORTS);
for (i = 0; i < nb_ports; i++) {
if (!kni_port_params_array[i])
continue;
@@ -831,7 +829,8 @@ kni_free_kni(uint8_t port_id)
return -1;
for (i = 0; i < p[port_id]->nb_kni; i++) {
- rte_kni_release(p[port_id]->kni[i]);
+ if (rte_kni_release(p[port_id]->kni[i]))
+ printf("Fail to release kni\n");
p[port_id]->kni[i] = NULL;
}
rte_eth_dev_stop(port_id);
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index d4e2d8de..8dc616d4 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -341,20 +341,25 @@ fill_supported_algorithm_tables(void)
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL");
+ strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC],
+ "AES_XCBC_MAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC");
strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2");
+ strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9");
for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++)
strcpy(supported_cipher_algo[i], "NOT_SUPPORTED");
strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CBC], "AES_CBC");
+ strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CTR], "AES_CTR");
strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM");
strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL");
strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2");
+ strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8");
}
@@ -463,8 +468,9 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
op->sym->auth.digest.length = cparams->digest_length;
- /* For SNOW3G algorithms, offset/length must be in bits */
- if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
+ /* For SNOW3G/KASUMI algorithms, offset/length must be in bits */
+ if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+ cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9) {
op->sym->auth.data.offset = ipdata_offset << 3;
op->sym->auth.data.length = data_len << 3;
} else {
@@ -485,7 +491,8 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
op->sym->cipher.iv.length = cparams->iv.length;
/* For SNOW3G algorithms, offset/length must be in bits */
- if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2) {
+ if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+ cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8) {
op->sym->cipher.data.offset = ipdata_offset << 3;
if (cparams->do_hash && cparams->hash_verify)
/* Do not cipher the hash tag */
@@ -1486,6 +1493,15 @@ check_supported_size(uint16_t length, uint16_t min, uint16_t max,
{
uint16_t supp_size;
+ /* Single value */
+ if (increment == 0) {
+ if (length == min)
+ return 0;
+ else
+ return -1;
+ }
+
+ /* Range of values */
for (supp_size = min; supp_size <= max; supp_size += increment) {
if (length == supp_size)
return 0;
@@ -1785,9 +1801,6 @@ initialize_ports(struct l2fwd_crypto_options *options)
return -1;
}
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* Reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
diff --git a/examples/l2fwd-ivshmem/host/host.c b/examples/l2fwd-ivshmem/host/host.c
index 4bd7c41d..cd284b7d 100644
--- a/examples/l2fwd-ivshmem/host/host.c
+++ b/examples/l2fwd-ivshmem/host/host.c
@@ -677,9 +677,6 @@ int main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/*
* reserve memzone to communicate with VMs - we cannot use rte_malloc here
* because while it is technically possible, it is a very bad idea to share
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 9f3a77d2..614ea604 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -811,9 +811,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
@@ -990,7 +987,7 @@ main(int argc, char **argv)
struct rte_jobstats *job = &qconf->port_fwd_jobs[i];
portid = qconf->rx_port_list[i];
- printf("Setting forward jon for port %u\n", portid);
+ printf("Setting forward job for port %u\n", portid);
snprintf(name, RTE_DIM(name), "port %u fwd", portid);
/* Setup forward job.
diff --git a/examples/l2fwd-keepalive/Makefile b/examples/l2fwd-keepalive/Makefile
index 568edcb4..ca45a798 100644
--- a/examples/l2fwd-keepalive/Makefile
+++ b/examples/l2fwd-keepalive/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -42,9 +42,10 @@ include $(RTE_SDK)/mk/rte.vars.mk
APP = l2fwd-keepalive
# all source are stored in SRCS-y
-SRCS-y := main.c
+SRCS-y := main.c shm.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDFLAGS += -lrt
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l2fwd-keepalive/ka-agent/Makefile b/examples/l2fwd-keepalive/ka-agent/Makefile
new file mode 100644
index 00000000..fd0c38b4
--- /dev/null
+++ b/examples/l2fwd-keepalive/ka-agent/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = ka-agent
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)/../
+LDFLAGS += -lrt
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l2fwd-keepalive/ka-agent/main.c b/examples/l2fwd-keepalive/ka-agent/main.c
new file mode 100644
index 00000000..be1c7f49
--- /dev/null
+++ b/examples/l2fwd-keepalive/ka-agent/main.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+
+#include <rte_keepalive.h>
+
+#include <shm.h>
+
+#define MAX_TIMEOUTS 4
+#define SEM_TIMEOUT_SECS 2
+
+static struct rte_keepalive_shm *ka_shm_create(void)
+{
+ int fd = shm_open(RTE_KEEPALIVE_SHM_NAME, O_RDWR, 0666);
+ size_t size = sizeof(struct rte_keepalive_shm);
+ struct rte_keepalive_shm *shm;
+
+ if (fd < 0)
+ printf("Failed to open %s as SHM:%s\n",
+ RTE_KEEPALIVE_SHM_NAME,
+ strerror(errno));
+ else {
+ shm = (struct rte_keepalive_shm *) mmap(
+ 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (shm == MAP_FAILED)
+ printf("Failed to mmap SHM:%s\n", strerror(errno));
+ else
+ return shm;
+ }
+
+ /* Reset to zero, as it was set to MAP_FAILED aka: (void *)-1 */
+ shm = 0;
+ return NULL;
+}
+
+int main(void)
+{
+ struct rte_keepalive_shm *shm = ka_shm_create();
+ struct timespec timeout = { .tv_nsec = 0 };
+ int idx_core;
+ int cnt_cores;
+ uint64_t last_seen_alive_time = 0;
+ uint64_t most_recent_alive_time;
+ int cnt_timeouts = 0;
+ int sem_errno;
+
+ if (shm == NULL) {
+ printf("Unable to access shared core state\n");
+ return 1;
+ }
+ while (1) {
+ most_recent_alive_time = 0;
+ for (idx_core = 0; idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_last_seen_times[idx_core] >
+ most_recent_alive_time)
+ most_recent_alive_time =
+ shm->core_last_seen_times[idx_core];
+
+ timeout.tv_sec = time(NULL) + SEM_TIMEOUT_SECS;
+ if (sem_timedwait(&shm->core_died, &timeout) == -1) {
+ /* Assume no core death signals and no change in any
+ * last-seen times is the keepalive monitor itself
+ * failing.
+ */
+ sem_errno = errno;
+ last_seen_alive_time = most_recent_alive_time;
+ if (sem_errno == ETIMEDOUT) {
+ if (last_seen_alive_time ==
+ most_recent_alive_time &&
+ cnt_timeouts++ >
+ MAX_TIMEOUTS) {
+ printf("No updates. Exiting..\n");
+ break;
+ }
+ } else
+ printf("sem_timedwait() error (%s)\n",
+ strerror(sem_errno));
+ continue;
+ }
+ cnt_timeouts = 0;
+
+ cnt_cores = 0;
+ for (idx_core = 0; idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_state[idx_core] == RTE_KA_STATE_DEAD)
+ cnt_cores++;
+ if (cnt_cores == 0) {
+ /* Can happen if core was restarted since Semaphore
+ * was sent, due to agent being offline.
+ */
+ printf("Warning: Empty dead core report\n");
+ continue;
+ }
+
+ printf("%i dead cores: ", cnt_cores);
+ for (idx_core = 0;
+ idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_state[idx_core] == RTE_KA_STATE_DEAD)
+ printf("%d, ", idx_core);
+ printf("\b\b\n");
+ }
+ if (munmap(shm, sizeof(struct rte_keepalive_shm)) != 0)
+ printf("Warning: munmap() failed\n");
+ return 0;
+}
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 8da89aa1..84a59eb6 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -72,6 +72,8 @@
#include <rte_timer.h>
#include <rte_keepalive.h>
+#include "shm.h"
+
#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
#define NB_MBUF 8192
@@ -523,7 +525,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
}
static void
-dead_core(__attribute__((unused)) void *ptr_data, const int id_core)
+dead_core(__rte_unused void *ptr_data, const int id_core)
{
printf("Dead core %i - restarting..\n", id_core);
if (rte_eal_get_lcore_state(id_core) == FINISHED) {
@@ -534,6 +536,14 @@ dead_core(__attribute__((unused)) void *ptr_data, const int id_core)
}
}
+static void
+relay_core_state(void *ptr_data, const int id_core,
+ const enum rte_keepalive_state core_state, uint64_t last_alive)
+{
+ rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data,
+ id_core, core_state, last_alive);
+}
+
int
main(int argc, char **argv)
{
@@ -570,9 +580,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
@@ -725,10 +732,18 @@ main(int argc, char **argv)
rte_timer_init(&stats_timer);
if (check_period > 0) {
+ struct rte_keepalive_shm *ka_shm;
+
+ ka_shm = rte_keepalive_shm_create();
+ if (ka_shm == NULL)
+ rte_exit(EXIT_FAILURE,
+ "rte_keepalive_shm_create() failed");
rte_global_keepalive_info =
- rte_keepalive_create(&dead_core, NULL);
+ rte_keepalive_create(&dead_core, ka_shm);
if (rte_global_keepalive_info == NULL)
rte_exit(EXIT_FAILURE, "init_keep_alive() failed");
+ rte_keepalive_register_relay_callback(rte_global_keepalive_info,
+ relay_core_state, ka_shm);
rte_timer_init(&hb_timer);
if (rte_timer_reset(&hb_timer,
(check_period * rte_get_timer_hz()) / 1000,
diff --git a/examples/l2fwd-keepalive/shm.c b/examples/l2fwd-keepalive/shm.c
new file mode 100644
index 00000000..177aa5b8
--- /dev/null
+++ b/examples/l2fwd-keepalive/shm.c
@@ -0,0 +1,131 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_keepalive.h>
+
+#include "shm.h"
+
+struct rte_keepalive_shm *rte_keepalive_shm_create(void)
+{
+ int fd;
+ int idx_core;
+ struct rte_keepalive_shm *ka_shm;
+
+ /* If any existing object is not unlinked, it makes it all too easy
+ * for clients to end up with stale shared memory blocks when
+ * restarted. Unlinking makes sure subsequent shm_open by clients
+ * will get the new block mapped below.
+ */
+ if (shm_unlink(RTE_KEEPALIVE_SHM_NAME) == -1 && errno != ENOENT)
+ printf("Warning: Error unlinking stale %s (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME, strerror(errno));
+
+ fd = shm_open(RTE_KEEPALIVE_SHM_NAME,
+ O_CREAT | O_TRUNC | O_RDWR, 0666);
+ if (fd < 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to open %s as SHM (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME,
+ strerror(errno));
+ else if (ftruncate(fd, sizeof(struct rte_keepalive_shm)) != 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to resize SHM (%s)\n", strerror(errno));
+ else {
+ ka_shm = (struct rte_keepalive_shm *) mmap(
+ 0, sizeof(struct rte_keepalive_shm),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (ka_shm == MAP_FAILED)
+ RTE_LOG(INFO, EAL,
+ "Failed to mmap SHM (%s)\n", strerror(errno));
+ else {
+ memset(ka_shm, 0, sizeof(struct rte_keepalive_shm));
+
+ /* Initialize the semaphores for IPC/SHM use */
+ if (sem_init(&ka_shm->core_died, 1, 0) != 0) {
+ RTE_LOG(INFO, EAL,
+ "Failed to setup SHM semaphore (%s)\n",
+ strerror(errno));
+ munmap(ka_shm,
+ sizeof(struct rte_keepalive_shm));
+ return NULL;
+ }
+
+ /* Set all cores to 'not present' */
+ for (idx_core = 0;
+ idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++) {
+ ka_shm->core_state[idx_core] =
+ RTE_KA_STATE_UNUSED;
+ ka_shm->core_last_seen_times[idx_core] = 0;
+ }
+
+ return ka_shm;
+ }
+ }
+return NULL;
+}
+
+void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
+ const int id_core, const enum rte_keepalive_state core_state,
+ __rte_unused uint64_t last_alive)
+{
+ int count;
+
+ shm->core_state[id_core] = core_state;
+ shm->core_last_seen_times[id_core] = last_alive;
+
+ if (core_state == RTE_KEEPALIVE_SHM_DEAD) {
+ /* Since core has died, also signal ka_agent.
+ *
+ * Limit number of times semaphore can be incremented, in case
+ * ka_agent is not active.
+ */
+ if (sem_getvalue(&shm->core_died, &count) == -1) {
+ RTE_LOG(INFO, EAL, "Semaphore check failed(%s)\n",
+ strerror(errno));
+ return;
+ }
+ if (count > 1)
+ return;
+
+ if (sem_post(&shm->core_died) != 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to increment semaphore (%s)\n",
+ strerror(errno));
+ }
+}
diff --git a/examples/l2fwd-keepalive/shm.h b/examples/l2fwd-keepalive/shm.h
new file mode 100644
index 00000000..25e1b61d
--- /dev/null
+++ b/examples/l2fwd-keepalive/shm.h
@@ -0,0 +1,89 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define RTE_KEEPALIVE_SHM_NAME "/dpdk_keepalive_shm_name"
+
+#define RTE_KEEPALIVE_SHM_ALIVE 1
+#define RTE_KEEPALIVE_SHM_DEAD 2
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <semaphore.h>
+#include <rte_keepalive.h>
+
+/**
+ * Keepalive SHM structure.
+ *
+ * The shared memory allocated by the primary is this size, and contains the
+ * information as contained within this struct. A secondary may open the SHM,
+ * and read the contents.
+ */
+struct rte_keepalive_shm {
+ /** IPC semaphore. Posted when a core dies */
+ sem_t core_died;
+
+ /**
+ * Relayed status of each core.
+ */
+ enum rte_keepalive_state core_state[RTE_KEEPALIVE_MAXCORES];
+
+ /**
+ * Last-seen-alive timestamps for the cores
+ */
+ uint64_t core_last_seen_times[RTE_KEEPALIVE_MAXCORES];
+};
+
+/**
+ * Create shared host memory keepalive object.
+ * @return
+ * Pointer to SHM keepalive structure, or NULL on failure.
+ */
+struct rte_keepalive_shm *rte_keepalive_shm_create(void);
+
+/**
+ * Relays state for given core
+ * @param *shm
+ * Pointer to SHM keepalive structure.
+ * @param id_core
+ * Id of core
+ * @param core_state
+ * State of core
+ * @param last_alive
+ * Last seen timestamp for core
+ */
+void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
+ const int id_core, const enum rte_keepalive_state core_state,
+ uint64_t last_alive);
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 1ad94887..88979216 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -80,6 +80,7 @@ static volatile bool force_quit;
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MEMPOOL_CACHE_SIZE 256
/*
* Configurable number of RX/TX ring descriptors
@@ -134,10 +135,9 @@ struct l2fwd_port_statistics {
} __rte_cache_aligned;
struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
-/* A tsc-based timer responsible for triggering statistics printout */
-#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
#define MAX_TIMER_PERIOD 86400 /* 1 day max */
-static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
+/* A tsc-based timer responsible for triggering statistics printout */
+static uint64_t timer_period = 10; /* default period is 10 seconds */
/* Print out statistics on packets dropped */
static void
@@ -274,7 +274,7 @@ l2fwd_main_loop(void)
timer_tsc += diff_tsc;
/* if timer has reached its timeout */
- if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
+ if (unlikely(timer_tsc >= timer_period)) {
/* do this only on master core */
if (lcore_id == rte_get_master_lcore()) {
@@ -381,7 +381,7 @@ l2fwd_parse_timer_period(const char *q_arg)
static int
l2fwd_parse_args(int argc, char **argv)
{
- int opt, ret;
+ int opt, ret, timer_secs;
char **argvopt;
int option_index;
char *prgname = argv[0];
@@ -417,12 +417,13 @@ l2fwd_parse_args(int argc, char **argv)
/* timer period */
case 'T':
- timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
- if (timer_period < 0) {
+ timer_secs = l2fwd_parse_timer_period(optarg);
+ if (timer_secs < 0) {
printf("invalid timer period\n");
l2fwd_usage(prgname);
return -1;
}
+ timer_period = timer_secs;
break;
/* long options */
@@ -541,9 +542,13 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+ /* convert to number of cycles */
+ timer_period *= rte_get_timer_hz();
+
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+ MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
@@ -551,9 +556,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 26d9f5eb..16f6110e 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -72,6 +72,9 @@
#include <rte_string_fns.h>
#include <rte_acl.h>
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#define L3FWDACL_DEBUG
+#endif
#define DO_RFC_1812_CHECKS
#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
@@ -1914,8 +1917,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index cb42bfb9..f746960e 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1572,10 +1572,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
-
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index 034c22a7..ca01b112 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -982,8 +982,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index fc59243d..def5a024 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -259,6 +259,8 @@ em_mask_key(void *key, xmm_t mask)
return vandq_s32(data, mask);
}
+#else
+#error No vector engine (SSE, NEON) available, check your toolchain
#endif
static inline uint8_t
diff --git a/examples/l3fwd/l3fwd_em_hlm_sse.h b/examples/l3fwd/l3fwd_em_hlm_sse.h
index 5001c724..7714a20c 100644
--- a/examples/l3fwd/l3fwd_em_hlm_sse.h
+++ b/examples/l3fwd/l3fwd_em_hlm_sse.h
@@ -81,7 +81,7 @@ em_get_dst_port_ipv4x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
- rte_hash_lookup_multi(qconf->ipv4_lookup_struct, &key_array[0], 8, ret);
+ rte_hash_lookup_bulk(qconf->ipv4_lookup_struct, &key_array[0], 8, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ?
portid : ipv4_l3fwd_out_if[ret[0]]);
@@ -179,7 +179,7 @@ em_get_dst_port_ipv6x8(struct lcore_conf *qconf, struct rte_mbuf *m[8],
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
- rte_hash_lookup_multi(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
+ rte_hash_lookup_bulk(qconf->ipv6_lookup_struct, &key_array[0], 8, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ?
portid : ipv6_l3fwd_out_if[ret[0]]);
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index bf6d8856..7a79cd2c 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -868,8 +868,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index 99815989..04dc3e40 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -580,9 +580,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_panic("No Ethernet port - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/*
* Each logical core is assigned a dedicated TX queue on each port.
*/
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
index 2dc8b829..2d951d93 100644
--- a/examples/multi_process/l2fwd_fork/main.c
+++ b/examples/multi_process/l2fwd_fork/main.c
@@ -442,7 +442,8 @@ reset_slave_all_ports(unsigned slaveid)
pool = rte_mempool_lookup(buf_name);
if (pool)
printf("Port %d mempool free object is %u(%u)\n", slave->port[i],
- rte_mempool_count(pool), (unsigned)NB_MBUF);
+ rte_mempool_avail_count(pool),
+ (unsigned int)NB_MBUF);
else
printf("Can't find mempool %s\n", buf_name);
@@ -979,9 +980,6 @@ main(int argc, char **argv)
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
-
/* create the mbuf pool */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
diff --git a/examples/netmap_compat/lib/compat_netmap.c b/examples/netmap_compat/lib/compat_netmap.c
index bf1b418a..112c551f 100644
--- a/examples/netmap_compat/lib/compat_netmap.c
+++ b/examples/netmap_compat/lib/compat_netmap.c
@@ -865,6 +865,9 @@ rte_netmap_poll(struct pollfd *fds, nfds_t nfds, int timeout)
uint32_t i, idx, port;
uint32_t want_rx, want_tx;
+ if (timeout > 0)
+ return -1;
+
ret = 0;
do {
for (i = 0; i < nfds; i++) {
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 15bb900c..3c88b86e 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -55,17 +55,6 @@
#define RING_SIZE 16384
-/* uncomment below line to enable debug logs */
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_REORDERAPP RTE_LOGTYPE_USER1
@@ -240,7 +229,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
/* free the mbufs which failed from transmit */
app_stats.tx.ro_tx_failed_pkts += count;
- LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
+ RTE_LOG(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
pktmbuf_free_bulk(unsent, count);
}
@@ -421,7 +410,7 @@ rx_thread(struct rte_ring *ring_out)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
pkts, MAX_PKTS_BURST);
if (nb_rx_pkts == 0) {
- LOG_DEBUG(REORDERAPP,
+ RTE_LOG(DEBUG, REORDERAPP,
"%s():Received zero packets\n", __func__);
continue;
}
@@ -533,7 +522,8 @@ send_thread(struct send_thread_args *args)
if (ret == -1 && rte_errno == ERANGE) {
/* Too early pkts should be transmitted out directly */
- LOG_DEBUG(REORDERAPP, "%s():Cannot reorder early packet "
+ RTE_LOG(DEBUG, REORDERAPP,
+ "%s():Cannot reorder early packet "
"direct enqueuing to TX\n", __func__);
outp = mbufs[i]->port;
if ((portmask & (1 << outp)) == 0) {
diff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c
index 8fbff737..062275a4 100644
--- a/examples/performance-thread/common/lthread.c
+++ b/examples/performance-thread/common/lthread.c
@@ -143,7 +143,7 @@ struct lthread_stack *_stack_alloc(void)
struct lthread_stack *s;
s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);
- LTHREAD_ASSERT(s != NULL);
+ RTE_ASSERT(s != NULL);
s->root_sched = THIS_SCHED;
s->stack_size = LTHREAD_MAX_STACK_SIZE;
diff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h
index c8357f4a..b858b55e 100644
--- a/examples/performance-thread/common/lthread_int.h
+++ b/examples/performance-thread/common/lthread_int.h
@@ -197,16 +197,4 @@ struct lthread {
uint64_t diag_ref; /* ref to user diag data */
} __rte_cache_aligned;
-/*
- * Assert
- */
-#if LTHREAD_DIAG
-#define LTHREAD_ASSERT(expr) do { \
- if (!(expr)) \
- rte_panic("line%d\tassert \"" #expr "\" failed\n", __LINE__);\
-} while (0)
-#else
-#define LTHREAD_ASSERT(expr) do {} while (0)
-#endif
-
#endif /* LTHREAD_INT_H */
diff --git a/examples/performance-thread/common/lthread_mutex.c b/examples/performance-thread/common/lthread_mutex.c
index af8b82d2..c1bc6271 100644
--- a/examples/performance-thread/common/lthread_mutex.c
+++ b/examples/performance-thread/common/lthread_mutex.c
@@ -170,7 +170,6 @@ int lthread_mutex_lock(struct lthread_mutex *m)
_suspend();
/* resumed, must loop and compete for the lock again */
}
- LTHREAD_ASSERT(0);
return 0;
}
@@ -231,7 +230,7 @@ int lthread_mutex_unlock(struct lthread_mutex *m)
if (unblocked != NULL) {
rte_atomic64_dec(&m->count);
DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
- LTHREAD_ASSERT(unblocked->sched != NULL);
+ RTE_ASSERT(unblocked->sched != NULL);
_ready_queue_insert((struct lthread_sched *)
unblocked->sched, unblocked);
break;
diff --git a/examples/performance-thread/common/lthread_pool.h b/examples/performance-thread/common/lthread_pool.h
index a5f32515..27680eab 100644
--- a/examples/performance-thread/common/lthread_pool.h
+++ b/examples/performance-thread/common/lthread_pool.h
@@ -138,14 +138,14 @@ _qnode_pool_create(const char *name, int prealloc_size) {
RTE_CACHE_LINE_SIZE,
rte_socket_id());
- LTHREAD_ASSERT(p);
+ RTE_ASSERT(p);
p->stub = rte_malloc_socket(NULL,
sizeof(struct qnode),
RTE_CACHE_LINE_SIZE,
rte_socket_id());
- LTHREAD_ASSERT(p->stub);
+ RTE_ASSERT(p->stub);
if (name != NULL)
strncpy(p->name, name, LT_MAX_NAME_SIZE);
diff --git a/examples/performance-thread/common/lthread_queue.h b/examples/performance-thread/common/lthread_queue.h
index 0c395167..2c55fcec 100644
--- a/examples/performance-thread/common/lthread_queue.h
+++ b/examples/performance-thread/common/lthread_queue.h
@@ -129,7 +129,7 @@ _lthread_queue_create(const char *name)
/* allocated stub node */
stub = _qnode_alloc();
- LTHREAD_ASSERT(stub);
+ RTE_ASSERT(stub);
if (name != NULL)
strncpy(new_queue->name, name, sizeof(new_queue->name));
diff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c
index 7c40bc05..c64c21ff 100644
--- a/examples/performance-thread/common/lthread_sched.c
+++ b/examples/performance-thread/common/lthread_sched.c
@@ -268,7 +268,7 @@ struct lthread_sched *_lthread_sched_create(size_t stack_size)
struct lthread_sched *new_sched;
unsigned lcoreid = rte_lcore_id();
- LTHREAD_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
+ RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
if (stack_size == 0)
stack_size = LTHREAD_MAX_STACK_SIZE;
diff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c
index 43cda4ff..6876f831 100644
--- a/examples/performance-thread/common/lthread_tls.c
+++ b/examples/performance-thread/common/lthread_tls.c
@@ -94,7 +94,7 @@ void _lthread_key_pool_init(void)
pool = rte_ring_create(name,
LTHREAD_MAX_KEYS, 0, 0);
- LTHREAD_ASSERT(pool);
+ RTE_ASSERT(pool);
int i;
@@ -240,7 +240,7 @@ void _lthread_tls_alloc(struct lthread *lt)
tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);
- LTHREAD_ASSERT(tls != NULL);
+ RTE_ASSERT(tls != NULL);
tls->root_sched = (THIS_SCHED);
lt->tls = tls;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 15c0a4de..fdc90b28 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -996,7 +996,7 @@ simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
- rte_hash_lookup_multi(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
+ rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
&key_array[0], 8, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
@@ -1150,7 +1150,7 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
const void *key_array[8] = {&key[0], &key[1], &key[2], &key[3],
&key[4], &key[5], &key[6], &key[7]};
- rte_hash_lookup_multi(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
+ rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
&key_array[0], 4, ret);
dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv6_l3fwd_out_if[ret[0]]);
dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv6_l3fwd_out_if[ret[1]]);
@@ -1307,7 +1307,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
* to BAD_PORT value.
*/
static inline __attribute__((always_inline)) void
-rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint32_t *dp, uint32_t ptype)
+rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
{
uint8_t ihl;
@@ -1326,7 +1326,7 @@ rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint32_t *dp, uint32_t ptype)
}
#else
-#define rfc1812_process(mb, dp) do { } while (0)
+#define rfc1812_process(mb, dp, ptype) do { } while (0)
#endif /* DO_RFC_1812_CHECKS */
#endif /* APP_LOOKUP_LPM && ENABLE_MULTI_BUFFER_OPTIMIZE */
@@ -1343,28 +1343,27 @@ get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
struct ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
- if (rte_lpm_lookup(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
- dst_ipv4, &next_hop_ipv4) != 0) {
- next_hop_ipv4 = portid;
- return next_hop_ipv4;
- }
+ return (uint16_t) ((rte_lpm_lookup(
+ RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct, dst_ipv4,
+ &next_hop_ipv4) == 0) ? next_hop_ipv4 : portid);
+
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
+
eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
- if (rte_lpm6_lookup(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop_ipv6) != 0) {
- next_hop_ipv6 = portid;
- return next_hop_ipv6;
- }
- } else {
- next_hop_ipv4 = portid;
- return next_hop_ipv4;
+
+ return (uint16_t) ((rte_lpm6_lookup(
+ RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
+ ipv6_hdr->dst_addr, &next_hop_ipv6) == 0) ? next_hop_ipv6 :
+ portid);
+
}
+ return portid;
}
static inline void
-process_packet(struct rte_mbuf *pkt, uint32_t *dst_port, uint8_t portid)
+process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint8_t portid)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
@@ -1431,9 +1430,9 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
static inline void
processx4_step2(__m128i dip,
uint32_t ipv4_flag,
- uint32_t portid,
+ uint8_t portid,
struct rte_mbuf *pkt[FWDSTEP],
- uint32_t dprt[FWDSTEP])
+ uint16_t dprt[FWDSTEP])
{
rte_xmm_t dst;
const __m128i bswap_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11,
@@ -1445,7 +1444,11 @@ processx4_step2(__m128i dip,
/* if all 4 packets are IPV4. */
if (likely(ipv4_flag)) {
rte_lpm_lookupx4(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct, dip,
- dprt, portid);
+ dst.u32, portid);
+
+ /* get rid of unused upper 16 bit for each dport. */
+ dst.x = _mm_packs_epi32(dst.x, dst.x);
+ *(uint64_t *)dprt = dst.u64[0];
} else {
dst.x = dip;
dprt[0] = get_dst_port(pkt[0], dst.u32[0], portid);
@@ -1460,7 +1463,7 @@ processx4_step2(__m128i dip,
* Perform RFC1812 checks and updates for IPV4 packets.
*/
static inline void
-processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint32_t dst_port[FWDSTEP])
+processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
{
__m128i te[FWDSTEP];
__m128i ve[FWDSTEP];
@@ -1658,9 +1661,9 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
/* if dest port value has changed. */
if (v != GRPMSK) {
- lp = pnum->u16 + gptbl[v].idx;
- lp[0] = 1;
pnum->u64 = gptbl[v].pnum;
+ pnum->u16[FWDSTEP] = 1;
+ lp = pnum->u16 + gptbl[v].idx;
}
return lp;
@@ -1679,7 +1682,7 @@ process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
int32_t k;
uint16_t dlp;
uint16_t *lp;
- uint32_t dst_port[MAX_PKT_BURST];
+ uint16_t dst_port[MAX_PKT_BURST];
__m128i dip[MAX_PKT_BURST / FWDSTEP];
uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
uint16_t pnum[MAX_PKT_BURST + 1];
@@ -3482,8 +3485,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "init_rx_rings failed\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
if (check_port_config(nb_ports) < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index b968b001..15656155 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -133,14 +133,20 @@ struct rte_meter_trtcm_params app_trtcm_params[] = {
FLOW_METER app_flows[APP_FLOWS_MAX];
-static void
+static int
app_configure_flow_table(void)
{
uint32_t i, j;
+ int ret;
- for (i = 0, j = 0; i < APP_FLOWS_MAX; i ++, j = (j + 1) % RTE_DIM(PARAMS)){
- FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
+ for (i = 0, j = 0; i < APP_FLOWS_MAX;
+ i ++, j = (j + 1) % RTE_DIM(PARAMS)) {
+ ret = FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static inline void
@@ -381,7 +387,9 @@ main(int argc, char **argv)
rte_eth_promiscuous_enable(port_tx);
/* App configuration */
- app_configure_flow_table();
+ ret = app_configure_flow_table();
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid configure flow table\n");
/* Launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/qos_meter/main.h b/examples/qos_meter/main.h
index 530bf69c..54867dcf 100644
--- a/examples/qos_meter/main.h
+++ b/examples/qos_meter/main.h
@@ -51,7 +51,7 @@ enum policer_action policer_table[e_RTE_METER_COLORS][e_RTE_METER_COLORS] =
#if APP_MODE == APP_MODE_FWD
#define FUNC_METER(a,b,c,d) color, flow_id=flow_id, pkt_len=pkt_len, time=time
-#define FUNC_CONFIG(a,b)
+#define FUNC_CONFIG(a, b) 0
#define PARAMS app_srtcm_params
#define FLOW_METER int
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index 3e7fd087..476a0ee1 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -123,7 +123,7 @@ app_eal_core_mask(void)
uint64_t cm = 0;
struct rte_config *cfg = rte_eal_get_configuration();
- for (i = 0; i < RTE_MAX_LCORE; i++) {
+ for (i = 0; i < APP_MAX_LCORE; i++) {
if (cfg->lcore_role[i] == ROLE_RTE)
cm |= (1ULL << i);
}
@@ -142,7 +142,7 @@ app_cpu_core_count(void)
char path[PATH_MAX];
uint32_t ncores = 0;
- for(i = 0; i < RTE_MAX_LCORE; i++) {
+ for (i = 0; i < APP_MAX_LCORE; i++) {
len = snprintf(path, sizeof(path), SYS_CPU_DIR, i);
if (len <= 0 || (unsigned)len >= sizeof(path))
continue;
@@ -162,7 +162,7 @@ static int
app_parse_opt_vals(const char *conf_str, char separator, uint32_t n_vals, uint32_t *opt_vals)
{
char *string;
- uint32_t i, n_tokens;
+ int i, n_tokens;
char *tokens[MAX_OPT_VALUES];
if (conf_str == NULL || opt_vals == NULL || n_vals == 0 || n_vals > MAX_OPT_VALUES)
@@ -175,9 +175,11 @@ app_parse_opt_vals(const char *conf_str, char separator, uint32_t n_vals, uint32
n_tokens = rte_strsplit(string, strnlen(string, 32), tokens, n_vals, separator);
- for(i = 0; i < n_tokens; i++) {
+ if (n_tokens > MAX_OPT_VALUES)
+ return -1;
+
+ for (i = 0; i < n_tokens; i++)
opt_vals[i] = (uint32_t)atol(tokens[i]);
- }
free(string);
@@ -270,7 +272,7 @@ app_parse_flow_conf(const char *conf_str)
}
if (pconf->tx_port >= RTE_MAX_ETHPORTS) {
RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu8" index\n",
- nb_pfc, pconf->rx_port);
+ nb_pfc, pconf->tx_port);
return -1;
}
diff --git a/examples/qos_sched/main.c b/examples/qos_sched/main.c
index e16b164d..e10cfd44 100644
--- a/examples/qos_sched/main.c
+++ b/examples/qos_sched/main.c
@@ -201,8 +201,6 @@ app_stat(void)
stats.oerrors - tx_stats[i].oerrors);
memcpy(&tx_stats[i], &stats, sizeof(stats));
- //printf("MP = %d\n", rte_mempool_count(conf->app_pktmbuf_pool));
-
#if APP_COLLECT_STAT
printf("-------+------------+------------+\n");
printf(" | received | dropped |\n");
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index 82aa0fae..c7490c61 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -68,7 +68,10 @@ extern "C" {
#define BURST_TX_DRAIN_US 100
-#define MAX_DATA_STREAMS (RTE_MAX_LCORE/2)
+#ifndef APP_MAX_LCORE
+#define APP_MAX_LCORE 64
+#endif
+#define MAX_DATA_STREAMS (APP_MAX_LCORE/2)
#define MAX_SCHED_SUBPORTS 8
#define MAX_SCHED_PIPES 4096
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index afc13665..c2087218 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -170,5 +170,5 @@ setup_shared_variables(void)
rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
}
diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c
index eb2f618a..4961089b 100644
--- a/examples/quota_watermark/qwctl/qwctl.c
+++ b/examples/quota_watermark/qwctl/qwctl.c
@@ -68,7 +68,7 @@ setup_shared_variables(void)
rte_exit(EXIT_FAILURE, "Couldn't find memzone\n");
quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
}
int main(int argc, char **argv)
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index f97d552a..622f248a 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -566,10 +566,9 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
struct rte_mbuf **m_table;
unsigned len, ret = 0;
const uint16_t lcore_id = rte_lcore_id();
- struct virtio_net *dev = vdev->dev;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n",
- dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
+ vdev->vid);
/* Add packet to the port tx queue */
tx_q = &lcore_tx_queue[lcore_id];
@@ -578,8 +577,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
tx_q->m_table[len] = m;
len++;
if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
+ dev_statistics[vdev->vid].tx_total++;
+ dev_statistics[vdev->vid].tx++;
}
if (unlikely(len == MAX_PKT_BURST)) {
@@ -614,7 +613,6 @@ static int
switch_worker(__rte_unused void *arg)
{
struct rte_mempool *mbuf_pool = arg;
- struct virtio_net *dev = NULL;
struct vhost_dev *vdev = NULL;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct virtio_net_data_ll *dev_ll;
@@ -651,7 +649,7 @@ switch_worker(__rte_unused void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after "
+ RTE_LOG(DEBUG, VHOST_DATA, "TX queue drained after "
"timeout with burst size %u\n",
tx_q->len);
ret = overlay_options.tx_handle(ports[0],
@@ -688,7 +686,6 @@ switch_worker(__rte_unused void *arg)
while (dev_ll != NULL) {
vdev = dev_ll->vdev;
- dev = vdev->dev;
if (unlikely(vdev->remove)) {
dev_ll = dev_ll->next;
@@ -709,22 +706,22 @@ switch_worker(__rte_unused void *arg)
* must be less than virtio queue size
*/
if (enable_retry && unlikely(rx_count >
- rte_vring_available_entries(dev, VIRTIO_RXQ))) {
+ rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))) {
for (retry = 0; retry < burst_rx_retry_num;
retry++) {
rte_delay_us(burst_rx_delay_time);
- if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
+ if (rx_count <= rte_vhost_avail_entries(vdev->vid, VIRTIO_RXQ))
break;
}
}
- ret_count = overlay_options.rx_handle(dev, pkts_burst, rx_count);
+ ret_count = overlay_options.rx_handle(vdev->vid, pkts_burst, rx_count);
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_total_atomic,
+ &dev_statistics[vdev->vid].rx_total_atomic,
rx_count);
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_atomic, ret_count);
+ &dev_statistics[vdev->vid].rx_atomic, ret_count);
}
while (likely(rx_count)) {
rx_count--;
@@ -736,7 +733,7 @@ switch_worker(__rte_unused void *arg)
if (likely(!vdev->remove)) {
/* Handle guest TX*/
- tx_count = rte_vhost_dequeue_burst(dev,
+ tx_count = rte_vhost_dequeue_burst(vdev->vid,
VIRTIO_TXQ, mbuf_pool,
pkts_burst, MAX_PKT_BURST);
/* If this is the first received packet we need to learn the MAC */
@@ -908,23 +905,27 @@ init_data_ll(void)
/**
* Remove a device from the specific data core linked list and
* from the main linked list. Synchonization occurs through the use
- * of the lcore dev_removal_flag. Device is made volatile here
- * to avoid re-ordering of dev->remove=1 which can cause an infinite
- * loop in the rte_pause loop.
+ * of the lcore dev_removal_flag.
*/
static void
-destroy_device(volatile struct virtio_net *dev)
+destroy_device(int vid)
{
struct virtio_net_data_ll *ll_lcore_dev_cur;
struct virtio_net_data_ll *ll_main_dev_cur;
struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
struct virtio_net_data_ll *ll_main_dev_last = NULL;
- struct vhost_dev *vdev;
+ struct vhost_dev *vdev = NULL;
int lcore;
- dev->flags &= ~VIRTIO_DEV_RUNNING;
-
- vdev = (struct vhost_dev *)dev->priv;
+ ll_main_dev_cur = ll_root_used;
+ while (ll_main_dev_cur != NULL) {
+ if (ll_main_dev_cur->vdev->vid == vid) {
+ vdev = ll_main_dev_cur->vdev;
+ break;
+ }
+ }
+ if (!vdev)
+ return;
/* set the remove flag. */
vdev->remove = 1;
@@ -944,8 +945,7 @@ destroy_device(volatile struct virtio_net *dev)
if (ll_lcore_dev_cur == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find the dev to be destroy.\n",
- dev->device_fh);
+ "(%d) Failed to find the dev to be destroy.\n", vid);
return;
}
@@ -992,8 +992,8 @@ destroy_device(volatile struct virtio_net *dev)
/* Decrement number of device on the lcore. */
lcore_info[vdev->coreid].lcore_ll->device_num--;
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed "
- "from data core\n", dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been removed "
+ "from data core\n", vid);
rte_free(vdev);
@@ -1004,7 +1004,7 @@ destroy_device(volatile struct virtio_net *dev)
* to the main linked list and the allocated to a specific data core.
*/
static int
-new_device(struct virtio_net *dev)
+new_device(int vid)
{
struct virtio_net_data_ll *ll_dev;
int lcore, core_add = 0;
@@ -1014,18 +1014,16 @@ new_device(struct virtio_net *dev)
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
- dev->device_fh);
+ "(%d) Couldn't allocate memory for vhost dev\n", vid);
return -1;
}
- vdev->dev = dev;
- dev->priv = vdev;
+ vdev->vid = vid;
/* Add device to main ll */
ll_dev = get_data_ll_free_entry(&ll_root_free);
if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in"
+ RTE_LOG(INFO, VHOST_DATA, "(%d) No free entry found in"
" linked list Device limit of %d devices per core"
- " has been reached\n", dev->device_fh, nb_devices);
+ " has been reached\n", vid, nb_devices);
if (vdev->regions_hpa)
rte_free(vdev->regions_hpa);
rte_free(vdev);
@@ -1033,7 +1031,7 @@ new_device(struct virtio_net *dev)
}
ll_dev->vdev = vdev;
add_data_ll_entry(&ll_root_used, ll_dev);
- vdev->rx_q = dev->device_fh;
+ vdev->rx_q = vid;
/* reset ready flag */
vdev->ready = DEVICE_MAC_LEARNING;
@@ -1050,10 +1048,10 @@ new_device(struct virtio_net *dev)
ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
if (ll_dev == NULL) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") Failed to add device to data core\n",
- dev->device_fh);
+ "(%d) Failed to add device to data core\n",
+ vid);
vdev->ready = DEVICE_SAFE_REMOVE;
- destroy_device(dev);
+ destroy_device(vid);
rte_free(vdev->regions_hpa);
rte_free(vdev);
return -1;
@@ -1065,17 +1063,16 @@ new_device(struct virtio_net *dev)
ll_dev);
/* Initialize device stats */
- memset(&dev_statistics[dev->device_fh], 0,
+ memset(&dev_statistics[vid], 0,
sizeof(struct device_statistics));
/* Disable notifications. */
- rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
- rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
lcore_info[vdev->coreid].lcore_ll->device_num++;
- dev->flags |= VIRTIO_DEV_RUNNING;
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n",
- dev->device_fh, vdev->coreid);
+ RTE_LOG(INFO, VHOST_DATA, "(%d) Device has been added to data core %d\n",
+ vid, vdev->coreid);
return 0;
}
@@ -1099,7 +1096,7 @@ print_stats(void)
struct virtio_net_data_ll *dev_ll;
uint64_t tx_dropped, rx_dropped;
uint64_t tx, tx_total, rx, rx_total, rx_ip_csum, rx_l4_csum;
- uint32_t device_fh;
+ int vid;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
@@ -1113,22 +1110,22 @@ print_stats(void)
dev_ll = ll_root_used;
while (dev_ll != NULL) {
- device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
- tx_total = dev_statistics[device_fh].tx_total;
- tx = dev_statistics[device_fh].tx;
+ vid = dev_ll->vdev->vid;
+ tx_total = dev_statistics[vid].tx_total;
+ tx = dev_statistics[vid].tx;
tx_dropped = tx_total - tx;
rx_total = rte_atomic64_read(
- &dev_statistics[device_fh].rx_total_atomic);
+ &dev_statistics[vid].rx_total_atomic);
rx = rte_atomic64_read(
- &dev_statistics[device_fh].rx_atomic);
+ &dev_statistics[vid].rx_atomic);
rx_dropped = rx_total - rx;
rx_ip_csum = rte_atomic64_read(
- &dev_statistics[device_fh].rx_bad_ip_csum);
+ &dev_statistics[vid].rx_bad_ip_csum);
rx_l4_csum = rte_atomic64_read(
- &dev_statistics[device_fh].rx_bad_l4_csum);
+ &dev_statistics[vid].rx_bad_l4_csum);
- printf("\nStatistics for device %"PRIu32" ----------"
+ printf("\nStatistics for device %d ----------"
"\nTX total: %"PRIu64""
"\nTX dropped: %"PRIu64""
"\nTX successful: %"PRIu64""
@@ -1137,7 +1134,7 @@ print_stats(void)
"\nRX bad L4 csum: %"PRIu64""
"\nRX dropped: %"PRIu64""
"\nRX successful: %"PRIu64"",
- device_fh,
+ vid,
tx_total,
tx_dropped,
tx,
@@ -1190,8 +1187,6 @@ main(int argc, char *argv[])
/* Get the number of physical ports. */
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NB_PORTS and global array PORTS
@@ -1220,9 +1215,6 @@ main(int argc, char *argv[])
for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
vpool_array[queue_id].pool = mbuf_pool;
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
-
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
@@ -1251,7 +1243,7 @@ main(int argc, char *argv[])
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
ret = rte_thread_setname(tid, thread_name);
if (ret != 0)
- RTE_LOG(ERR, VHOST_CONFIG, "Cannot set print-stats name\n");
+ RTE_LOG(DEBUG, VHOST_CONFIG, "Cannot set print-stats name\n");
}
/* Launch all data cores. */
@@ -1262,7 +1254,7 @@ main(int argc, char *argv[])
rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
/* Register CUSE device to handle IOCTLs. */
- ret = rte_vhost_driver_register((char *)&dev_basename);
+ ret = rte_vhost_driver_register((char *)&dev_basename, 0);
if (ret != 0)
rte_exit(EXIT_FAILURE, "CUSE device setup failure.\n");
diff --git a/examples/tep_termination/main.h b/examples/tep_termination/main.h
index a34301ad..c0ea7667 100644
--- a/examples/tep_termination/main.h
+++ b/examples/tep_termination/main.h
@@ -36,14 +36,6 @@
#include <rte_ether.h>
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
@@ -79,8 +71,7 @@ struct device_statistics {
* Device linked list structure for data path.
*/
struct vhost_dev {
- /**< Pointer to device created by vhost lib. */
- struct virtio_net *dev;
+ int vid;
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
/**< Memory region information for gpa to hpa translation. */
@@ -124,6 +115,6 @@ struct virtio_net_data_ll {
};
uint32_t
-virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count);
+virtio_dev_rx(int vid, struct rte_mbuf **pkts, uint32_t count);
#endif /* _MAIN_H_ */
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index 2a48e142..37575c27 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -244,17 +244,16 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
{
int i, ret;
struct ether_hdr *pkt_hdr;
- struct virtio_net *dev = vdev->dev;
- uint64_t portid = dev->device_fh;
+ uint64_t portid = vdev->vid;
struct ipv4_hdr *ip;
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
if (unlikely(portid > VXLAN_N_PORTS)) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") WARNING: Not configuring device,"
+ "(%d) WARNING: Not configuring device,"
"as already have %d ports for VXLAN.",
- dev->device_fh, VXLAN_N_PORTS);
+ vdev->vid, VXLAN_N_PORTS);
return -1;
}
@@ -262,9 +261,9 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") WARNING: This device is using an existing"
+ "(%d) WARNING: This device is using an existing"
" MAC address and has not been registered.\n",
- dev->device_fh);
+ vdev->vid);
return -1;
}
@@ -425,8 +424,7 @@ vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
/* Check for decapsulation and pass packets directly to VIRTIO device */
int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
- uint32_t rx_count)
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
{
uint32_t i = 0;
uint32_t count = 0;
@@ -436,11 +434,11 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
for (i = 0; i < rx_count; i++) {
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ &dev_statistics[vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
!= 0);
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ &dev_statistics[vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
!= 0);
}
@@ -452,6 +450,6 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
count++;
}
- ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count);
+ ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
return ret;
}
diff --git a/examples/tep_termination/vxlan_setup.h b/examples/tep_termination/vxlan_setup.h
index 1846540f..8d264619 100644
--- a/examples/tep_termination/vxlan_setup.h
+++ b/examples/tep_termination/vxlan_setup.h
@@ -55,10 +55,10 @@ typedef void (*ol_tunnel_destroy_t)(struct vhost_dev *vdev);
typedef int (*ol_tx_handle_t)(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
-typedef int (*ol_rx_handle_t)(struct virtio_net *dev, struct rte_mbuf **pkts,
+typedef int (*ol_rx_handle_t)(int vid, struct rte_mbuf **pkts,
uint32_t count);
-typedef int (*ol_param_handle)(struct virtio_net *dev);
+typedef int (*ol_param_handle)(int vid);
struct ol_switch_ops {
ol_port_configure_t port_configure;
@@ -82,6 +82,6 @@ int
vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count);
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts, uint32_t count);
#endif /* VXLAN_SETUP_H_ */
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 28c17afd..3aff2cc8 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,26 +62,9 @@
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
-/*
- * Calculate the number of buffers needed per port
- */
-#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
- (num_switching_cores*MAX_PKT_BURST) + \
- (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
- ((num_switching_cores+1)*MBUF_CACHE_SIZE))
-
#define MBUF_CACHE_SIZE 128
#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
-/*
- * No frame data buffer allocated from host are required for zero copy
- * implementation, guest will allocate the frame data buffer, and vhost
- * directly use it.
- */
-#define VIRTIO_DESCRIPTOR_LEN_ZCP RTE_MBUF_DEFAULT_DATAROOM
-#define MBUF_DATA_SIZE_ZCP RTE_MBUF_DEFAULT_BUF_SIZE
-#define MBUF_CACHE_SIZE_ZCP 0
-
#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@@ -95,33 +78,10 @@
#define DEVICE_RX 1
#define DEVICE_SAFE_REMOVE 2
-/* Config_core_flag status definitions. */
-#define REQUEST_DEV_REMOVAL 1
-#define ACK_DEV_REMOVAL 0
-
/* Configurable number of RX/TX ring descriptors */
#define RTE_TEST_RX_DESC_DEFAULT 1024
#define RTE_TEST_TX_DESC_DEFAULT 512
-/*
- * Need refine these 2 macros for legacy and DPDK based front end:
- * Max vring avail descriptor/entries from guest - MAX_PKT_BURST
- * And then adjust power 2.
- */
-/*
- * For legacy front end, 128 descriptors,
- * half for virtio header, another half for mbuf.
- */
-#define RTE_TEST_RX_DESC_DEFAULT_ZCP 32 /* legacy: 32, DPDK virt FE: 128. */
-#define RTE_TEST_TX_DESC_DEFAULT_ZCP 64 /* legacy: 64, DPDK virt FE: 64. */
-
-/* Get first 4 bytes in mbuf headroom. */
-#define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
- + sizeof(struct rte_mbuf)))
-
-/* true if x is a power of 2 */
-#define POWEROF2(x) ((((x)-1) & (x)) == 0)
-
#define INVALID_PORT_ID 0xFF
/* Max number of devices. Limited by vmdq. */
@@ -136,50 +96,22 @@
/* Maximum long option length for option parsing. */
#define MAX_LONG_OPT_SZ 64
-/* Used to compare MAC addresses. */
-#define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
-
-/* Number of descriptors per cacheline. */
-#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
-
-#define MBUF_EXT_MEM(mb) (rte_mbuf_from_indirect(mb) != (mb))
-
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
/* Promiscuous mode */
static uint32_t promiscuous;
-/*Number of switching cores enabled*/
-static uint32_t num_switching_cores = 0;
-
/* number of devices/queues to support*/
static uint32_t num_queues = 0;
static uint32_t num_devices;
-/*
- * Enable zero copy, pkts buffer will directly dma to hw descriptor,
- * disabled on default.
- */
-static uint32_t zero_copy;
+static struct rte_mempool *mbuf_pool;
static int mergeable;
/* Do vlan strip on host, enabled on default */
static uint32_t vlan_strip = 1;
-/* number of descriptors to apply*/
-static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP;
-static uint32_t num_tx_descriptor = RTE_TEST_TX_DESC_DEFAULT_ZCP;
-
-/* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */
-#define MAX_RING_DESC 4096
-
-struct vpool {
- struct rte_mempool *pool;
- struct rte_ring *ring;
- uint32_t buf_size;
-} vpool_array[MAX_QUEUES+MAX_QUEUES];
-
/* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
typedef enum {
VM2VM_DISABLED = 0,
@@ -189,14 +121,6 @@ typedef enum {
} vm2vm_type;
static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
-/* The type of host physical address translated from guest physical address. */
-typedef enum {
- PHYS_ADDR_CONTINUOUS = 0,
- PHYS_ADDR_CROSS_SUBREG = 1,
- PHYS_ADDR_INVALID = 2,
- PHYS_ADDR_LAST
-} hpa_type;
-
/* Enable stats. */
static uint32_t enable_stats = 0;
/* Enable retries on RX. */
@@ -208,6 +132,8 @@ static uint32_t enable_tx_csum;
/* Disable TSO offload */
static uint32_t enable_tso;
+static int client_mode;
+
/* Specify timeout (in useconds) between retries on RX. */
static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
/* Specify the number of retries on RX. */
@@ -259,7 +185,6 @@ static uint16_t num_pf_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
static uint16_t queues_per_pool;
-static const uint16_t external_pkt_default_vlan_tag = 2000;
const uint16_t vlan_tags[] = {
1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
@@ -274,11 +199,9 @@ const uint16_t vlan_tags[] = {
/* ethernet addresses of ports */
static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
-/* heads for the main used and free linked lists for the data path. */
-static struct virtio_net_data_ll *ll_root_used = NULL;
-static struct virtio_net_data_ll *ll_root_free = NULL;
+static struct vhost_dev_tailq_list vhost_dev_list =
+ TAILQ_HEAD_INITIALIZER(vhost_dev_list);
-/* Array of data core structures containing information on individual core linked lists. */
static struct lcore_info lcore_info[RTE_MAX_LCORE];
/* Used for queueing bursts of TX packets. */
@@ -291,32 +214,9 @@ struct mbuf_table {
/* TX queue for each data core. */
struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
-/* TX queue fori each virtio device for zero copy. */
-struct mbuf_table tx_queue_zcp[MAX_QUEUES];
-
-/* Vlan header struct used to insert vlan tags on TX. */
-struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- __be16 h_vlan_encapsulated_proto;
-};
-
-/* Header lengths. */
+#define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
+ / US_PER_S * BURST_TX_DRAIN_US)
#define VLAN_HLEN 4
-#define VLAN_ETH_HLEN 18
-
-/* Per-device statistics struct */
-struct device_statistics {
- uint64_t tx_total;
- rte_atomic64_t rx_total_atomic;
- uint64_t rx_total;
- uint64_t tx;
- rte_atomic64_t rx_atomic;
- uint64_t rx;
-} __rte_cache_aligned;
-struct device_statistics dev_statistics[MAX_DEVICES];
/*
* Builds up the correct configuration for VMDQ VLAN pool map
@@ -394,29 +294,12 @@ port_init(uint8_t port)
/* Enable vlan offload */
txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
- /*
- * Zero copy defers queue RX/TX start to the time when guest
- * finishes its startup and packet buffers from that guest are
- * available.
- */
- if (zero_copy) {
- rxconf->rx_deferred_start = 1;
- rxconf->rx_drop_en = 0;
- txconf->tx_deferred_start = 1;
- }
-
/*configure the number of supported virtio devices based on VMDQ limits */
num_devices = dev_info.max_vmdq_pools;
- if (zero_copy) {
- rx_ring_size = num_rx_descriptor;
- tx_ring_size = num_tx_descriptor;
- tx_rings = dev_info.max_tx_queues;
- } else {
- rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
- tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
- tx_rings = (uint16_t)rte_lcore_count();
- }
+ rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ tx_rings = (uint16_t)rte_lcore_count();
retval = validate_num_devices(MAX_DEVICES);
if (retval < 0)
@@ -457,7 +340,7 @@ port_init(uint8_t port)
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
rte_eth_dev_socket_id(port),
rxconf,
- vpool_array[q].pool);
+ mbuf_pool);
if (retval < 0)
return retval;
}
@@ -576,14 +459,9 @@ us_vhost_usage(const char *prgname)
" --vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
" --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
" --dev-basename: The basename to be used for the character device.\n"
- " --zero-copy [0|1]: disable(default)/enable rx/tx "
- "zero copy\n"
- " --rx-desc-num [0-N]: the number of descriptors on rx, "
- "used only when zero copy is enabled.\n"
- " --tx-desc-num [0-N]: the number of descriptors on tx, "
- "used only when zero copy is enabled.\n"
" --tx-csum [0|1] disable/enable TX checksum offload.\n"
- " --tso [0|1] disable/enable TCP segment offload.\n",
+ " --tso [0|1] disable/enable TCP segment offload.\n"
+ " --client register a vhost-user socket as client mode.\n",
prgname);
}
@@ -606,11 +484,9 @@ us_vhost_parse_args(int argc, char **argv)
{"vlan-strip", required_argument, NULL, 0},
{"stats", required_argument, NULL, 0},
{"dev-basename", required_argument, NULL, 0},
- {"zero-copy", required_argument, NULL, 0},
- {"rx-desc-num", required_argument, NULL, 0},
- {"tx-desc-num", required_argument, NULL, 0},
{"tx-csum", required_argument, NULL, 0},
{"tso", required_argument, NULL, 0},
+ {"client", no_argument, &client_mode, 1},
{NULL, 0, 0, 0},
};
@@ -765,50 +641,6 @@ us_vhost_parse_args(int argc, char **argv)
}
}
- /* Enable/disable rx/tx zero copy. */
- if (!strncmp(long_option[option_index].name,
- "zero-copy", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument"
- " for zero-copy [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else
- zero_copy = ret;
- }
-
- /* Specify the descriptor number on RX. */
- if (!strncmp(long_option[option_index].name,
- "rx-desc-num", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, MAX_RING_DESC);
- if ((ret == -1) || (!POWEROF2(ret))) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for rx-desc-num[0-N],"
- "power of 2 required.\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- num_rx_descriptor = ret;
- }
- }
-
- /* Specify the descriptor number on TX. */
- if (!strncmp(long_option[option_index].name,
- "tx-desc-num", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, MAX_RING_DESC);
- if ((ret == -1) || (!POWEROF2(ret))) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "Invalid argument for tx-desc-num [0-N],"
- "power of 2 required.\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- num_tx_descriptor = ret;
- }
- }
-
break;
/* Invalid option - print options. */
@@ -829,21 +661,6 @@ us_vhost_parse_args(int argc, char **argv)
return -1;
}
- if ((zero_copy == 1) && (vm2vm_mode == VM2VM_SOFTWARE)) {
- RTE_LOG(INFO, VHOST_PORT,
- "Vhost zero copy doesn't support software vm2vm,"
- "please specify 'vm2vm 2' to use hardware vm2vm.\n");
- return -1;
- }
-
- if ((zero_copy == 1) && (vmdq_conf_default.rxmode.jumbo_frame == 1)) {
- RTE_LOG(INFO, VHOST_PORT,
- "Vhost zero copy doesn't support jumbo frame,"
- "please specify '--mergeable 0' to disable the "
- "mergeable feature.\n");
- return -1;
- }
-
return 0;
}
@@ -873,74 +690,18 @@ static unsigned check_ports_num(unsigned nb_ports)
return valid_num_ports;
}
-/*
- * Macro to print out packet contents. Wrapped in debug define so that the
- * data path is not effected when debug is disabled.
- */
-#ifdef DEBUG
-#define PRINT_PACKET(device, addr, size, header) do { \
- char *pkt_addr = (char*)(addr); \
- unsigned int index; \
- char packet[MAX_PRINT_BUFF]; \
- \
- if ((header)) \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
- else \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
- for (index = 0; index < (size); index++) { \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
- "%02hhx ", pkt_addr[index]); \
- } \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
- \
- LOG_DEBUG(VHOST_DATA, "%s", packet); \
-} while(0)
-#else
-#define PRINT_PACKET(device, addr, size, header) do{} while(0)
-#endif
-
-/*
- * Function to convert guest physical addresses to vhost physical addresses.
- * This is used to convert virtio buffer addresses.
- */
-static inline uint64_t __attribute__((always_inline))
-gpa_to_hpa(struct vhost_dev *vdev, uint64_t guest_pa,
- uint32_t buf_len, hpa_type *addr_type)
+static inline struct vhost_dev *__attribute__((always_inline))
+find_vhost_dev(struct ether_addr *mac)
{
- struct virtio_memory_regions_hpa *region;
- uint32_t regionidx;
- uint64_t vhost_pa = 0;
-
- *addr_type = PHYS_ADDR_INVALID;
-
- for (regionidx = 0; regionidx < vdev->nregions_hpa; regionidx++) {
- region = &vdev->regions_hpa[regionidx];
- if ((guest_pa >= region->guest_phys_address) &&
- (guest_pa <= region->guest_phys_address_end)) {
- vhost_pa = region->host_phys_addr_offset + guest_pa;
- if (likely((guest_pa + buf_len - 1)
- <= region->guest_phys_address_end))
- *addr_type = PHYS_ADDR_CONTINUOUS;
- else
- *addr_type = PHYS_ADDR_CROSS_SUBREG;
- break;
- }
- }
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| HPA %p\n",
- vdev->dev->device_fh, (void *)(uintptr_t)guest_pa,
- (void *)(uintptr_t)vhost_pa);
+ struct vhost_dev *vdev;
- return vhost_pa;
-}
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->ready == DEVICE_RX &&
+ is_same_ether_addr(mac, &vdev->mac_address))
+ return vdev;
+ }
-/*
- * Compares a packet destination MAC address to a device MAC address.
- */
-static inline int __attribute__((always_inline))
-ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
-{
- return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
+ return NULL;
}
/*
@@ -951,32 +712,28 @@ static int
link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
{
struct ether_hdr *pkt_hdr;
- struct virtio_net_data_ll *dev_ll;
- struct virtio_net *dev = vdev->dev;
int i, ret;
/* Learn MAC address of guest device from packet */
pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- dev_ll = ll_root_used;
-
- while (dev_ll != NULL) {
- if (ether_addr_cmp(&(pkt_hdr->s_addr), &dev_ll->vdev->mac_address)) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
- return -1;
- }
- dev_ll = dev_ll->next;
+ if (find_vhost_dev(&pkt_hdr->s_addr)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) device is using a registered MAC!\n",
+ vdev->vid);
+ return -1;
}
for (i = 0; i < ETHER_ADDR_LEN; i++)
vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
/* vlan_tag currently uses the device_id. */
- vdev->vlan_tag = vlan_tags[dev->device_fh];
+ vdev->vlan_tag = vlan_tags[vdev->vid];
/* Print out VMDQ registration info. */
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
- dev->device_fh,
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
+ vdev->vid,
vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
@@ -984,10 +741,11 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
/* Register the MAC address. */
ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
- (uint32_t)dev->device_fh + vmdq_pool_base);
+ (uint32_t)vdev->vid + vmdq_pool_base);
if (ret)
- RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
- dev->device_fh);
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) failed to add device MAC address to VMDQ\n",
+ vdev->vid);
/* Enable stripping of the vlan tag as we handle routing. */
if (vlan_strip)
@@ -1035,6 +793,21 @@ unlink_vmdq(struct vhost_dev *vdev)
}
}
+static inline void __attribute__((always_inline))
+virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+ struct rte_mbuf *m)
+{
+ uint16_t ret;
+
+ ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+ if (enable_stats) {
+ rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
+ rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
+ src_vdev->stats.tx_total++;
+ src_vdev->stats.tx += ret;
+ }
+}
+
/*
* Check if the packet destination MAC address is for a local device. If so then put
* the packet on that devices RX queue. If not then return.
@@ -1042,56 +815,33 @@ unlink_vmdq(struct vhost_dev *vdev)
static inline int __attribute__((always_inline))
virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
{
- struct virtio_net_data_ll *dev_ll;
struct ether_hdr *pkt_hdr;
- uint64_t ret = 0;
- struct virtio_net *dev = vdev->dev;
- struct virtio_net *tdev; /* destination virito device */
+ struct vhost_dev *dst_vdev;
pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- /*get the used devices list*/
- dev_ll = ll_root_used;
+ dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+ if (!dst_vdev)
+ return -1;
- while (dev_ll != NULL) {
- if ((dev_ll->vdev->ready == DEVICE_RX) && ether_addr_cmp(&(pkt_hdr->d_addr),
- &dev_ll->vdev->mac_address)) {
+ if (vdev->vid == dst_vdev->vid) {
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: src and dst MAC is same. Dropping packet.\n",
+ vdev->vid);
+ return 0;
+ }
- /* Drop the packet if the TX packet is destined for the TX device. */
- if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
- dev->device_fh);
- return 0;
- }
- tdev = dev_ll->vdev->dev;
-
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", tdev->device_fh);
-
- if (unlikely(dev_ll->vdev->remove)) {
- /*drop the packet if the device is marked for removal*/
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", tdev->device_fh);
- } else {
- /*send the packet to the local virtio device*/
- ret = rte_vhost_enqueue_burst(tdev, VIRTIO_RXQ, &m, 1);
- if (enable_stats) {
- rte_atomic64_add(
- &dev_statistics[tdev->device_fh].rx_total_atomic,
- 1);
- rte_atomic64_add(
- &dev_statistics[tdev->device_fh].rx_atomic,
- ret);
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx += ret;
- }
- }
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: MAC address is local\n", dst_vdev->vid);
- return 0;
- }
- dev_ll = dev_ll->next;
+ if (unlikely(dst_vdev->remove)) {
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) device is marked for removal\n", dst_vdev->vid);
+ return 0;
}
- return -1;
+ virtio_xmit(dst_vdev, vdev, m);
+ return 0;
}
/*
@@ -1099,49 +849,35 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
* and get its vlan tag, and offset if it is.
*/
static inline int __attribute__((always_inline))
-find_local_dest(struct virtio_net *dev, struct rte_mbuf *m,
+find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
uint32_t *offset, uint16_t *vlan_tag)
{
- struct virtio_net_data_ll *dev_ll = ll_root_used;
+ struct vhost_dev *dst_vdev;
struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- while (dev_ll != NULL) {
- if ((dev_ll->vdev->ready == DEVICE_RX)
- && ether_addr_cmp(&(pkt_hdr->d_addr),
- &dev_ll->vdev->mac_address)) {
- /*
- * Drop the packet if the TX packet is
- * destined for the TX device.
- */
- if (dev_ll->vdev->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") TX: Source and destination"
- " MAC addresses are the same. Dropping "
- "packet.\n",
- dev_ll->vdev->dev->device_fh);
- return -1;
- }
-
- /*
- * HW vlan strip will reduce the packet length
- * by minus length of vlan tag, so need restore
- * the packet length by plus it.
- */
- *offset = VLAN_HLEN;
- *vlan_tag =
- (uint16_t)
- vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh];
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") TX: pkt to local VM device id:"
- "(%"PRIu64") vlan tag: %d.\n",
- dev->device_fh, dev_ll->vdev->dev->device_fh,
- (int)*vlan_tag);
+ dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+ if (!dst_vdev)
+ return 0;
- break;
- }
- dev_ll = dev_ll->next;
+ if (vdev->vid == dst_vdev->vid) {
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: src and dst MAC is same. Dropping packet.\n",
+ vdev->vid);
+ return -1;
}
+
+ /*
+ * HW vlan strip will reduce the packet length
+ * by minus length of vlan tag, so need restore
+ * the packet length by plus it.
+ */
+ *offset = VLAN_HLEN;
+ *vlan_tag = vlan_tags[vdev->vid];
+
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
+ vdev->vid, dst_vdev->vid, *vlan_tag);
+
return 0;
}
@@ -1173,20 +909,49 @@ static void virtio_tx_offload(struct rte_mbuf *m)
tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
}
+static inline void
+free_pkts(struct rte_mbuf **pkts, uint16_t n)
+{
+ while (n--)
+ rte_pktmbuf_free(pkts[n]);
+}
+
+static inline void __attribute__((always_inline))
+do_drain_mbuf_table(struct mbuf_table *tx_q)
+{
+ uint16_t count;
+
+ count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
+ tx_q->m_table, tx_q->len);
+ if (unlikely(count < tx_q->len))
+ free_pkts(&tx_q->m_table[count], tx_q->len - count);
+
+ tx_q->len = 0;
+}
+
/*
- * This function routes the TX packet to the correct interface. This may be a local device
- * or the physical port.
+ * This function routes the TX packet to the correct interface. This
+ * may be a local device or the physical port.
*/
static inline void __attribute__((always_inline))
virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
{
struct mbuf_table *tx_q;
- struct rte_mbuf **m_table;
- unsigned len, ret, offset = 0;
+ unsigned offset = 0;
const uint16_t lcore_id = rte_lcore_id();
- struct virtio_net *dev = vdev->dev;
struct ether_hdr *nh;
+
+ nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
+ struct vhost_dev *vdev2;
+
+ TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
+ virtio_xmit(vdev2, vdev, m);
+ }
+ goto queue2nic;
+ }
+
/*check if destination is local VM*/
if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
rte_pktmbuf_free(m);
@@ -1194,17 +959,20 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
}
if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
- if (unlikely(find_local_dest(dev, m, &offset, &vlan_tag) != 0)) {
+ if (unlikely(find_local_dest(vdev, m, &offset,
+ &vlan_tag) != 0)) {
rte_pktmbuf_free(m);
return;
}
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "(%d) TX: MAC address is external\n", vdev->vid);
+
+queue2nic:
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
- len = tx_q->len;
nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
@@ -1242,1411 +1010,213 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
if (m->ol_flags & PKT_TX_TCP_SEG)
virtio_tx_offload(m);
- tx_q->m_table[len] = m;
- len++;
+ tx_q->m_table[tx_q->len++] = m;
if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
- }
-
- if (unlikely(len == MAX_PKT_BURST)) {
- m_table = (struct rte_mbuf **)tx_q->m_table;
- ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
- /* Free any buffers not handled by TX and update the port stats. */
- if (unlikely(ret < len)) {
- do {
- rte_pktmbuf_free(m_table[ret]);
- } while (++ret < len);
- }
-
- len = 0;
+ vdev->stats.tx_total++;
+ vdev->stats.tx++;
}
- tx_q->len = len;
- return;
+ if (unlikely(tx_q->len == MAX_PKT_BURST))
+ do_drain_mbuf_table(tx_q);
}
-/*
- * This function is called by each data core. It handles all RX/TX registered with the
- * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
- * with all devices in the main linked list.
- */
-static int
-switch_worker(__attribute__((unused)) void *arg)
-{
- struct rte_mempool *mbuf_pool = arg;
- struct virtio_net *dev = NULL;
- struct vhost_dev *vdev = NULL;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct virtio_net_data_ll *dev_ll;
- struct mbuf_table *tx_q;
- volatile struct lcore_ll_info *lcore_ll;
- const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
- uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
- unsigned ret, i;
- const uint16_t lcore_id = rte_lcore_id();
- const uint16_t num_cores = (uint16_t)rte_lcore_count();
- uint16_t rx_count = 0;
- uint16_t tx_count;
- uint32_t retry = 0;
-
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
- lcore_ll = lcore_info[lcore_id].lcore_ll;
- prev_tsc = 0;
-
- tx_q = &lcore_tx_queue[lcore_id];
- for (i = 0; i < num_cores; i ++) {
- if (lcore_ids[i] == lcore_id) {
- tx_q->txq_id = i;
- break;
- }
- }
-
- while(1) {
- cur_tsc = rte_rdtsc();
- /*
- * TX burst queue drain
- */
- diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
-
- if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
-
- /*Tx any packets in the queue*/
- ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
- (struct rte_mbuf **)tx_q->m_table,
- (uint16_t)tx_q->len);
- if (unlikely(ret < tx_q->len)) {
- do {
- rte_pktmbuf_free(tx_q->m_table[ret]);
- } while (++ret < tx_q->len);
- }
-
- tx_q->len = 0;
- }
- prev_tsc = cur_tsc;
- }
-
- rte_prefetch0(lcore_ll->ll_root_used);
- /*
- * Inform the configuration core that we have exited the linked list and that no devices are
- * in use if requested.
- */
- if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
- lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
-
- /*
- * Process devices
- */
- dev_ll = lcore_ll->ll_root_used;
-
- while (dev_ll != NULL) {
- /*get virtio device ID*/
- vdev = dev_ll->vdev;
- dev = vdev->dev;
-
- if (unlikely(vdev->remove)) {
- dev_ll = dev_ll->next;
- unlink_vmdq(vdev);
- vdev->ready = DEVICE_SAFE_REMOVE;
- continue;
- }
- if (likely(vdev->ready == DEVICE_RX)) {
- /*Handle guest RX*/
- rx_count = rte_eth_rx_burst(ports[0],
- vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
-
- if (rx_count) {
- /*
- * Retry is enabled and the queue is full then we wait and retry to avoid packet loss
- * Here MAX_PKT_BURST must be less than virtio queue size
- */
- if (enable_retry && unlikely(rx_count > rte_vring_available_entries(dev, VIRTIO_RXQ))) {
- for (retry = 0; retry < burst_rx_retry_num; retry++) {
- rte_delay_us(burst_rx_delay_time);
- if (rx_count <= rte_vring_available_entries(dev, VIRTIO_RXQ))
- break;
- }
- }
- ret_count = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_burst, rx_count);
- if (enable_stats) {
- rte_atomic64_add(
- &dev_statistics[dev_ll->vdev->dev->device_fh].rx_total_atomic,
- rx_count);
- rte_atomic64_add(
- &dev_statistics[dev_ll->vdev->dev->device_fh].rx_atomic, ret_count);
- }
- while (likely(rx_count)) {
- rx_count--;
- rte_pktmbuf_free(pkts_burst[rx_count]);
- }
+static inline void __attribute__((always_inline))
+drain_mbuf_table(struct mbuf_table *tx_q)
+{
+ static uint64_t prev_tsc;
+ uint64_t cur_tsc;
- }
- }
+ if (tx_q->len == 0)
+ return;
- if (likely(!vdev->remove)) {
- /* Handle guest TX*/
- tx_count = rte_vhost_dequeue_burst(dev, VIRTIO_TXQ, mbuf_pool, pkts_burst, MAX_PKT_BURST);
- /* If this is the first received packet we need to learn the MAC and setup VMDQ */
- if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && tx_count) {
- if (vdev->remove || (link_vmdq(vdev, pkts_burst[0]) == -1)) {
- while (tx_count)
- rte_pktmbuf_free(pkts_burst[--tx_count]);
- }
- }
- for (i = 0; i < tx_count; ++i) {
- virtio_tx_route(vdev, pkts_burst[i],
- vlan_tags[(uint16_t)dev->device_fh]);
- }
- }
+ cur_tsc = rte_rdtsc();
+ if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
+ prev_tsc = cur_tsc;
- /*move to the next device in the list*/
- dev_ll = dev_ll->next;
- }
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "TX queue drained after timeout with burst size %u\n",
+ tx_q->len);
+ do_drain_mbuf_table(tx_q);
}
-
- return 0;
}
-/*
- * This function gets available ring number for zero copy rx.
- * Only one thread will call this funciton for a paticular virtio device,
- * so, it is designed as non-thread-safe function.
- */
-static inline uint32_t __attribute__((always_inline))
-get_available_ring_num_zcp(struct virtio_net *dev)
-{
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];
- uint16_t avail_idx;
-
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- return (uint32_t)(avail_idx - vq->last_used_idx_res);
-}
-
-/*
- * This function gets available ring index for zero copy rx,
- * it will retry 'burst_rx_retry_num' times till it get enough ring index.
- * Only one thread will call this funciton for a paticular virtio device,
- * so, it is designed as non-thread-safe function.
- */
-static inline uint32_t __attribute__((always_inline))
-get_available_ring_index_zcp(struct virtio_net *dev,
- uint16_t *res_base_idx, uint32_t count)
+static inline void __attribute__((always_inline))
+drain_eth_rx(struct vhost_dev *vdev)
{
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];
- uint16_t avail_idx;
- uint32_t retry = 0;
- uint16_t free_entries;
-
- *res_base_idx = vq->last_used_idx_res;
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- free_entries = (avail_idx - *res_base_idx);
+ uint16_t rx_count, enqueue_count;
+ struct rte_mbuf *pkts[MAX_PKT_BURST];
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") in get_available_ring_index_zcp: "
- "avail idx: %d, "
- "res base idx:%d, free entries:%d\n",
- dev->device_fh, avail_idx, *res_base_idx,
- free_entries);
+ rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
+ pkts, MAX_PKT_BURST);
+ if (!rx_count)
+ return;
/*
- * If retry is enabled and the queue is full then we wait
- * and retry to avoid packet loss.
+ * When "enable_retry" is set, here we wait and retry when there
+ * is no enough free slots in the queue to hold @rx_count packets,
+ * to diminish packet loss.
*/
- if (enable_retry && unlikely(count > free_entries)) {
+ if (enable_retry &&
+ unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
+ VIRTIO_RXQ))) {
+ uint32_t retry;
+
for (retry = 0; retry < burst_rx_retry_num; retry++) {
rte_delay_us(burst_rx_delay_time);
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- free_entries = (avail_idx - *res_base_idx);
- if (count <= free_entries)
+ if (rx_count <= rte_vhost_avail_entries(vdev->vid,
+ VIRTIO_RXQ))
break;
}
}
- /*check that we have enough buffers*/
- if (unlikely(count > free_entries))
- count = free_entries;
-
- if (unlikely(count == 0)) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") Fail in get_available_ring_index_zcp: "
- "avail idx: %d, res base idx:%d, free entries:%d\n",
- dev->device_fh, avail_idx,
- *res_base_idx, free_entries);
- return 0;
+ enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ pkts, rx_count);
+ if (enable_stats) {
+ rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
+ rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
}
- vq->last_used_idx_res = *res_base_idx + count;
-
- return count;
+ free_pkts(pkts, rx_count);
}
-/*
- * This function put descriptor back to used list.
- */
static inline void __attribute__((always_inline))
-put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx)
+drain_virtio_tx(struct vhost_dev *vdev)
{
- uint16_t res_cur_idx = vq->last_used_idx;
- vq->used->ring[res_cur_idx & (vq->size - 1)].id = (uint32_t)desc_idx;
- vq->used->ring[res_cur_idx & (vq->size - 1)].len = 0;
- rte_compiler_barrier();
- *(volatile uint16_t *)&vq->used->idx += 1;
- vq->last_used_idx += 1;
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write(vq->callfd, (eventfd_t)1);
-}
+ struct rte_mbuf *pkts[MAX_PKT_BURST];
+ uint16_t count;
+ uint16_t i;
-/*
- * This function get available descriptor from vitio vring and un-attached mbuf
- * from vpool->ring, and then attach them together. It needs adjust the offset
- * for buff_addr and phys_addr accroding to PMD implementation, otherwise the
- * frame data may be put to wrong location in mbuf.
- */
-static inline void __attribute__((always_inline))
-attach_rxmbuf_zcp(struct virtio_net *dev)
-{
- uint16_t res_base_idx, desc_idx;
- uint64_t buff_addr, phys_addr;
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- void *obj = NULL;
- struct rte_mbuf *mbuf;
- struct vpool *vpool;
- hpa_type addr_type;
- struct vhost_dev *vdev = (struct vhost_dev *)dev->priv;
-
- vpool = &vpool_array[vdev->vmdq_rx_q];
- vq = dev->virtqueue[VIRTIO_RXQ];
-
- do {
- if (unlikely(get_available_ring_index_zcp(vdev->dev, &res_base_idx,
- 1) != 1))
- return;
- desc_idx = vq->avail->ring[(res_base_idx) & (vq->size - 1)];
-
- desc = &vq->desc[desc_idx];
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc = &vq->desc[desc->next];
- buff_addr = gpa_to_vva(dev, desc->addr);
- phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len,
- &addr_type);
- } else {
- buff_addr = gpa_to_vva(dev,
- desc->addr + vq->vhost_hlen);
- phys_addr = gpa_to_hpa(vdev,
- desc->addr + vq->vhost_hlen,
- desc->len, &addr_type);
- }
+ count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
+ pkts, MAX_PKT_BURST);
- if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
- RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Invalid frame buffer"
- " address found when attaching RX frame buffer"
- " address!\n", dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- continue;
- }
-
- /*
- * Check if the frame buffer address from guest crosses
- * sub-region or not.
- */
- if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Frame buffer address cross "
- "sub-regioin found when attaching RX frame "
- "buffer address!\n",
- dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- continue;
- }
- } while (unlikely(phys_addr == 0));
-
- rte_ring_sc_dequeue(vpool->ring, &obj);
- mbuf = obj;
- if (unlikely(mbuf == NULL)) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in attach_rxmbuf_zcp: "
- "ring_sc_dequeue fail.\n",
- dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- return;
- }
-
- if (unlikely(vpool->buf_size > desc->len)) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in attach_rxmbuf_zcp: frame buffer "
- "length(%d) of descriptor idx: %d less than room "
- "size required: %d\n",
- dev->device_fh, desc->len, desc_idx, vpool->buf_size);
- put_desc_to_used_list_zcp(vq, desc_idx);
- rte_ring_sp_enqueue(vpool->ring, obj);
- return;
+ /* setup VMDq for the first packet */
+ if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
+ if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
+ free_pkts(pkts, count);
}
- mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);
- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;
- mbuf->data_len = desc->len;
- MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in attach_rxmbuf_zcp: res base idx:%d, "
- "descriptor idx:%d\n",
- dev->device_fh, res_base_idx, desc_idx);
-
- __rte_mbuf_raw_free(mbuf);
-
- return;
+ for (i = 0; i < count; ++i)
+ virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
}
/*
- * Detach an attched packet mbuf -
- * - restore original mbuf address and length values.
- * - reset pktmbuf data and data_len to their default values.
- * All other fields of the given packet mbuf will be left intact.
+ * Main function of vhost-switch. It basically does:
*
- * @param m
- * The attached packet mbuf.
- */
-static inline void pktmbuf_detach_zcp(struct rte_mbuf *m)
-{
- const struct rte_mempool *mp = m->pool;
- void *buf = rte_mbuf_to_baddr(m);
- uint32_t buf_ofs;
- uint32_t buf_len = mp->elt_size - sizeof(*m);
- m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof(*m);
-
- m->buf_addr = buf;
- m->buf_len = (uint16_t)buf_len;
-
- buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
- RTE_PKTMBUF_HEADROOM : m->buf_len;
- m->data_off = buf_ofs;
-
- m->data_len = 0;
-}
-
-/*
- * This function is called after packets have been transimited. It fetchs mbuf
- * from vpool->pool, detached it and put into vpool->ring. It also update the
- * used index and kick the guest if necessary.
- */
-static inline uint32_t __attribute__((always_inline))
-txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool)
-{
- struct rte_mbuf *mbuf;
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
- uint32_t used_idx = vq->last_used_idx & (vq->size - 1);
- uint32_t index = 0;
- uint32_t mbuf_count = rte_mempool_count(vpool->pool);
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool before "
- "clean is: %d\n",
- dev->device_fh, mbuf_count);
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring before "
- "clean is : %d\n",
- dev->device_fh, rte_ring_count(vpool->ring));
-
- for (index = 0; index < mbuf_count; index++) {
- mbuf = __rte_mbuf_raw_alloc(vpool->pool);
- if (likely(MBUF_EXT_MEM(mbuf)))
- pktmbuf_detach_zcp(mbuf);
- rte_ring_sp_enqueue(vpool->ring, mbuf);
-
- /* Update used index buffer information. */
- vq->used->ring[used_idx].id = MBUF_HEADROOM_UINT32(mbuf);
- vq->used->ring[used_idx].len = 0;
-
- used_idx = (used_idx + 1) & (vq->size - 1);
- }
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after "
- "clean is: %d\n",
- dev->device_fh, rte_mempool_count(vpool->pool));
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after "
- "clean is : %d\n",
- dev->device_fh, rte_ring_count(vpool->ring));
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: before updated "
- "vq->last_used_idx:%d\n",
- dev->device_fh, vq->last_used_idx);
-
- vq->last_used_idx += mbuf_count;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: after updated "
- "vq->last_used_idx:%d\n",
- dev->device_fh, vq->last_used_idx);
-
- rte_compiler_barrier();
-
- *(volatile uint16_t *)&vq->used->idx += mbuf_count;
-
- /* Kick guest if required. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write(vq->callfd, (eventfd_t)1);
-
- return 0;
-}
-
-/*
- * This function is called when a virtio device is destroy.
- * It fetchs mbuf from vpool->pool, and detached it, and put into vpool->ring.
- */
-static void mbuf_destroy_zcp(struct vpool *vpool)
-{
- struct rte_mbuf *mbuf = NULL;
- uint32_t index, mbuf_count = rte_mempool_count(vpool->pool);
-
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in mempool before "
- "mbuf_destroy_zcp is: %d\n",
- mbuf_count);
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in ring before "
- "mbuf_destroy_zcp is : %d\n",
- rte_ring_count(vpool->ring));
-
- for (index = 0; index < mbuf_count; index++) {
- mbuf = __rte_mbuf_raw_alloc(vpool->pool);
- if (likely(mbuf != NULL)) {
- if (likely(MBUF_EXT_MEM(mbuf)))
- pktmbuf_detach_zcp(mbuf);
- rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
- }
- }
-
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in mempool after "
- "mbuf_destroy_zcp is: %d\n",
- rte_mempool_count(vpool->pool));
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in ring after "
- "mbuf_destroy_zcp is : %d\n",
- rte_ring_count(vpool->ring));
-}
-
-/*
- * This function update the use flag and counter.
- */
-static inline uint32_t __attribute__((always_inline))
-virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
- uint32_t count)
-{
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- struct rte_mbuf *buff;
- /* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr
- = {{0, 0, 0, 0, 0, 0}, 0};
- uint64_t buff_hdr_addr = 0;
- uint32_t head[MAX_PKT_BURST], packet_len = 0;
- uint32_t head_idx, packet_success = 0;
- uint16_t res_cur_idx;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
-
- if (count == 0)
- return 0;
-
- vq = dev->virtqueue[VIRTIO_RXQ];
- count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
-
- res_cur_idx = vq->last_used_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
- dev->device_fh, res_cur_idx, res_cur_idx + count);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = MBUF_HEADROOM_UINT32(pkts[head_idx]);
-
- /*Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
-
- while (packet_success != count) {
- /* Get descriptor from available ring */
- desc = &vq->desc[head[packet_success]];
-
- buff = pkts[packet_success];
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: update the used idx for "
- "pkt[%d] descriptor idx: %d\n",
- dev->device_fh, packet_success,
- MBUF_HEADROOM_UINT32(buff));
-
- PRINT_PACKET(dev,
- (uintptr_t)(((uint64_t)(uintptr_t)buff->buf_addr)
- + RTE_PKTMBUF_HEADROOM),
- rte_pktmbuf_data_len(buff), 0);
-
- /* Buffer address translation for virtio header. */
- buff_hdr_addr = gpa_to_vva(dev, desc->addr);
- packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
-
- /*
- * If the descriptors are chained the header and data are
- * placed in separate buffers.
- */
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc->len = vq->vhost_hlen;
- desc = &vq->desc[desc->next];
- desc->len = rte_pktmbuf_data_len(buff);
- } else {
- desc->len = packet_len;
- }
-
- /* Update used ring with desc information */
- vq->used->ring[res_cur_idx & (vq->size - 1)].id
- = head[packet_success];
- vq->used->ring[res_cur_idx & (vq->size - 1)].len
- = packet_len;
- res_cur_idx++;
- packet_success++;
-
- /* A header is required per buffer. */
- rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
- (const void *)&virtio_hdr, vq->vhost_hlen);
-
- PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
-
- if (likely(packet_success < count)) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
- }
- }
-
- rte_compiler_barrier();
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: before update used idx: "
- "vq.last_used_idx: %d, vq->used->idx: %d\n",
- dev->device_fh, vq->last_used_idx, vq->used->idx);
-
- *(volatile uint16_t *)&vq->used->idx += count;
- vq->last_used_idx += count;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: after update used idx: "
- "vq.last_used_idx: %d, vq->used->idx: %d\n",
- dev->device_fh, vq->last_used_idx, vq->used->idx);
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write(vq->callfd, (eventfd_t)1);
-
- return count;
-}
-
-/*
- * This function routes the TX packet to the correct interface.
- * This may be a local device or the physical port.
+ * for each vhost device {
+ * - drain_eth_rx()
+ *
+ * Which drains the host eth Rx queue linked to the vhost device,
+ * and deliver all of them to guest virito Rx ring associated with
+ * this vhost device.
+ *
+ * - drain_virtio_tx()
+ *
+ * Which drains the guest virtio Tx queue and deliver all of them
+ * to the target, which could be another vhost device, or the
+ * physical eth dev. The route is done in function "virtio_tx_route".
+ * }
*/
-static inline void __attribute__((always_inline))
-virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
- uint32_t desc_idx, uint8_t need_copy)
+static int
+switch_worker(void *arg __rte_unused)
{
+ unsigned i;
+ unsigned lcore_id = rte_lcore_id();
+ struct vhost_dev *vdev;
struct mbuf_table *tx_q;
- struct rte_mbuf **m_table;
- void *obj = NULL;
- struct rte_mbuf *mbuf;
- unsigned len, ret, offset = 0;
- struct vpool *vpool;
- uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
- uint16_t vmdq_rx_q = ((struct vhost_dev *)dev->priv)->vmdq_rx_q;
-
- /*Add packet to the port tx queue*/
- tx_q = &tx_queue_zcp[vmdq_rx_q];
- len = tx_q->len;
-
- /* Allocate an mbuf and populate the structure. */
- vpool = &vpool_array[MAX_QUEUES + vmdq_rx_q];
- rte_ring_sc_dequeue(vpool->ring, &obj);
- mbuf = obj;
- if (unlikely(mbuf == NULL)) {
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Failed to allocate memory for mbuf.\n",
- dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- return;
- }
-
- if (vm2vm_mode == VM2VM_HARDWARE) {
- /* Avoid using a vlan tag from any vm for external pkt, such as
- * vlan_tags[dev->device_fh], oterwise, it conflicts when pool
- * selection, MAC address determines it as an external pkt
- * which should go to network, while vlan tag determine it as
- * a vm2vm pkt should forward to another vm. Hardware confuse
- * such a ambiguous situation, so pkt will lost.
- */
- vlan_tag = external_pkt_default_vlan_tag;
- if (find_local_dest(dev, m, &offset, &vlan_tag) != 0) {
- MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
- __rte_mbuf_raw_free(mbuf);
- return;
- }
- }
-
- mbuf->nb_segs = m->nb_segs;
- mbuf->next = m->next;
- mbuf->data_len = m->data_len + offset;
- mbuf->pkt_len = mbuf->data_len;
- if (unlikely(need_copy)) {
- /* Copy the packet contents to the mbuf. */
- rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
- rte_pktmbuf_mtod(m, void *),
- m->data_len);
- } else {
- mbuf->data_off = m->data_off;
- mbuf->buf_physaddr = m->buf_physaddr;
- mbuf->buf_addr = m->buf_addr;
- }
- mbuf->ol_flags |= PKT_TX_VLAN_PKT;
- mbuf->vlan_tci = vlan_tag;
- mbuf->l2_len = sizeof(struct ether_hdr);
- mbuf->l3_len = sizeof(struct ipv4_hdr);
- MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
-
- tx_q->m_table[len] = mbuf;
- len++;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
- dev->device_fh,
- mbuf->nb_segs,
- (mbuf->next == NULL) ? "null" : "non-null");
-
- if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
- }
- if (unlikely(len == MAX_PKT_BURST)) {
- m_table = (struct rte_mbuf **)tx_q->m_table;
- ret = rte_eth_tx_burst(ports[0],
- (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
+ RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
- /*
- * Free any buffers not handled by TX and update
- * the port stats.
- */
- if (unlikely(ret < len)) {
- do {
- rte_pktmbuf_free(m_table[ret]);
- } while (++ret < len);
+ tx_q = &lcore_tx_queue[lcore_id];
+ for (i = 0; i < rte_lcore_count(); i++) {
+ if (lcore_ids[i] == lcore_id) {
+ tx_q->txq_id = i;
+ break;
}
-
- len = 0;
- txmbuf_clean_zcp(dev, vpool);
}
- tx_q->len = len;
-
- return;
-}
-
-/*
- * This function TX all available packets in virtio TX queue for one
- * virtio-net device. If it is first packet, it learns MAC address and
- * setup VMDQ.
- */
-static inline void __attribute__((always_inline))
-virtio_dev_tx_zcp(struct virtio_net *dev)
-{
- struct rte_mbuf m;
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- uint64_t buff_addr = 0, phys_addr;
- uint32_t head[MAX_PKT_BURST];
- uint32_t i;
- uint16_t free_entries, packet_success = 0;
- uint16_t avail_idx;
- uint8_t need_copy = 0;
- hpa_type addr_type;
- struct vhost_dev *vdev = (struct vhost_dev *)dev->priv;
-
- vq = dev->virtqueue[VIRTIO_TXQ];
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- /* If there are no available buffers then return. */
- if (vq->last_used_idx_res == avail_idx)
- return;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
-
- /* Prefetch available ring to retrieve head indexes. */
- rte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]);
-
- /* Get the number of free entries in the ring */
- free_entries = (avail_idx - vq->last_used_idx_res);
-
- /* Limit to MAX_PKT_BURST. */
- free_entries
- = (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
- dev->device_fh, free_entries);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (i = 0; i < free_entries; i++)
- head[i]
- = vq->avail->ring[(vq->last_used_idx_res + i)
- & (vq->size - 1)];
-
- vq->last_used_idx_res += free_entries;
-
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
- rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);
-
- while (packet_success < free_entries) {
- desc = &vq->desc[head[packet_success]];
-
- /* Discard first buffer as it is the virtio header */
- desc = &vq->desc[desc->next];
-
- /* Buffer address translation. */
- buff_addr = gpa_to_vva(dev, desc->addr);
- /* Need check extra VLAN_HLEN size for inserting VLAN tag */
- phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len + VLAN_HLEN,
- &addr_type);
-
- if (likely(packet_success < (free_entries - 1)))
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success + 1]]);
-
- if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Invalid frame buffer address found"
- "when TX packets!\n",
- dev->device_fh);
- packet_success++;
- continue;
- }
-
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)buff_addr);
-
- /*
- * Setup dummy mbuf. This is copied to a real mbuf if
- * transmitted out the physical port.
- */
- m.data_len = desc->len;
- m.nb_segs = 1;
- m.next = NULL;
- m.data_off = 0;
- m.buf_addr = (void *)(uintptr_t)buff_addr;
- m.buf_physaddr = phys_addr;
-
- /*
- * Check if the frame buffer address from guest crosses
- * sub-region or not.
- */
- if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Frame buffer address cross "
- "sub-regioin found when attaching TX frame "
- "buffer address!\n",
- dev->device_fh);
- need_copy = 1;
- } else
- need_copy = 0;
-
- PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);
+ while(1) {
+ drain_mbuf_table(tx_q);
/*
- * If this is the first received packet we need to learn
- * the MAC and setup VMDQ
+ * Inform the configuration core that we have exited the
+ * linked list and that no devices are in use if requested.
*/
- if (unlikely(vdev->ready == DEVICE_MAC_LEARNING)) {
- if (vdev->remove || (link_vmdq(vdev, &m) == -1)) {
- /*
- * Discard frame if device is scheduled for
- * removal or a duplicate MAC address is found.
- */
- packet_success += free_entries;
- vq->last_used_idx += packet_success;
- break;
- }
- }
-
- virtio_tx_route_zcp(dev, &m, head[packet_success], need_copy);
- packet_success++;
- }
-}
-
-/*
- * This function is called by each data core. It handles all RX/TX registered
- * with the core. For TX the specific lcore linked list is used. For RX, MAC
- * addresses are compared with all devices in the main linked list.
- */
-static int
-switch_worker_zcp(__attribute__((unused)) void *arg)
-{
- struct virtio_net *dev = NULL;
- struct vhost_dev *vdev = NULL;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct virtio_net_data_ll *dev_ll;
- struct mbuf_table *tx_q;
- volatile struct lcore_ll_info *lcore_ll;
- const uint64_t drain_tsc
- = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S
- * BURST_TX_DRAIN_US;
- uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
- unsigned ret;
- const uint16_t lcore_id = rte_lcore_id();
- uint16_t count_in_ring, rx_count = 0;
-
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
-
- lcore_ll = lcore_info[lcore_id].lcore_ll;
- prev_tsc = 0;
-
- while (1) {
- cur_tsc = rte_rdtsc();
-
- /* TX burst queue drain */
- diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
- /*
- * Get mbuf from vpool.pool and detach mbuf and
- * put back into vpool.ring.
- */
- dev_ll = lcore_ll->ll_root_used;
- while ((dev_ll != NULL) && (dev_ll->vdev != NULL)) {
- /* Get virtio device ID */
- vdev = dev_ll->vdev;
- dev = vdev->dev;
-
- if (likely(!vdev->remove)) {
- tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q];
- if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA,
- "TX queue drained after timeout"
- " with burst size %u\n",
- tx_q->len);
-
- /*
- * Tx any packets in the queue
- */
- ret = rte_eth_tx_burst(
- ports[0],
- (uint16_t)tx_q->txq_id,
- (struct rte_mbuf **)
- tx_q->m_table,
- (uint16_t)tx_q->len);
- if (unlikely(ret < tx_q->len)) {
- do {
- rte_pktmbuf_free(
- tx_q->m_table[ret]);
- } while (++ret < tx_q->len);
- }
- tx_q->len = 0;
-
- txmbuf_clean_zcp(dev,
- &vpool_array[MAX_QUEUES+vdev->vmdq_rx_q]);
- }
- }
- dev_ll = dev_ll->next;
- }
- prev_tsc = cur_tsc;
- }
-
- rte_prefetch0(lcore_ll->ll_root_used);
+ if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
+ lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
/*
- * Inform the configuration core that we have exited the linked
- * list and that no devices are in use if requested.
+ * Process vhost devices
*/
- if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
- lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
-
- /* Process devices */
- dev_ll = lcore_ll->ll_root_used;
-
- while ((dev_ll != NULL) && (dev_ll->vdev != NULL)) {
- vdev = dev_ll->vdev;
- dev = vdev->dev;
+ TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
+ lcore_vdev_entry) {
if (unlikely(vdev->remove)) {
- dev_ll = dev_ll->next;
unlink_vmdq(vdev);
vdev->ready = DEVICE_SAFE_REMOVE;
continue;
}
- if (likely(vdev->ready == DEVICE_RX)) {
- uint32_t index = vdev->vmdq_rx_q;
- uint16_t i;
- count_in_ring
- = rte_ring_count(vpool_array[index].ring);
- uint16_t free_entries
- = (uint16_t)get_available_ring_num_zcp(dev);
-
- /*
- * Attach all mbufs in vpool.ring and put back
- * into vpool.pool.
- */
- for (i = 0;
- i < RTE_MIN(free_entries,
- RTE_MIN(count_in_ring, MAX_PKT_BURST));
- i++)
- attach_rxmbuf_zcp(dev);
-
- /* Handle guest RX */
- rx_count = rte_eth_rx_burst(ports[0],
- vdev->vmdq_rx_q, pkts_burst,
- MAX_PKT_BURST);
-
- if (rx_count) {
- ret_count = virtio_dev_rx_zcp(dev,
- pkts_burst, rx_count);
- if (enable_stats) {
- dev_statistics[dev->device_fh].rx_total
- += rx_count;
- dev_statistics[dev->device_fh].rx
- += ret_count;
- }
- while (likely(rx_count)) {
- rx_count--;
- pktmbuf_detach_zcp(
- pkts_burst[rx_count]);
- rte_ring_sp_enqueue(
- vpool_array[index].ring,
- (void *)pkts_burst[rx_count]);
- }
- }
- }
+ if (likely(vdev->ready == DEVICE_RX))
+ drain_eth_rx(vdev);
if (likely(!vdev->remove))
- /* Handle guest TX */
- virtio_dev_tx_zcp(dev);
-
- /* Move to the next device in the list */
- dev_ll = dev_ll->next;
+ drain_virtio_tx(vdev);
}
}
return 0;
}
-
-/*
- * Add an entry to a used linked list. A free entry must first be found
- * in the free linked list using get_data_ll_free_entry();
- */
-static void
-add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
- struct virtio_net_data_ll *ll_dev)
-{
- struct virtio_net_data_ll *ll = *ll_root_addr;
-
- /* Set next as NULL and use a compiler barrier to avoid reordering. */
- ll_dev->next = NULL;
- rte_compiler_barrier();
-
- /* If ll == NULL then this is the first device. */
- if (ll) {
- /* Increment to the tail of the linked list. */
- while ((ll->next != NULL) )
- ll = ll->next;
-
- ll->next = ll_dev;
- } else {
- *ll_root_addr = ll_dev;
- }
-}
-
-/*
- * Remove an entry from a used linked list. The entry must then be added to
- * the free linked list using put_data_ll_free_entry().
- */
-static void
-rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr,
- struct virtio_net_data_ll *ll_dev,
- struct virtio_net_data_ll *ll_dev_last)
-{
- struct virtio_net_data_ll *ll = *ll_root_addr;
-
- if (unlikely((ll == NULL) || (ll_dev == NULL)))
- return;
-
- if (ll_dev == ll)
- *ll_root_addr = ll_dev->next;
- else
- if (likely(ll_dev_last != NULL))
- ll_dev_last->next = ll_dev->next;
- else
- RTE_LOG(ERR, VHOST_CONFIG, "Remove entry form ll failed.\n");
-}
-
-/*
- * Find and return an entry from the free linked list.
- */
-static struct virtio_net_data_ll *
-get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
-{
- struct virtio_net_data_ll *ll_free = *ll_root_addr;
- struct virtio_net_data_ll *ll_dev;
-
- if (ll_free == NULL)
- return NULL;
-
- ll_dev = ll_free;
- *ll_root_addr = ll_free->next;
-
- return ll_dev;
-}
-
-/*
- * Place an entry back on to the free linked list.
- */
-static void
-put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr,
- struct virtio_net_data_ll *ll_dev)
-{
- struct virtio_net_data_ll *ll_free = *ll_root_addr;
-
- if (ll_dev == NULL)
- return;
-
- ll_dev->next = ll_free;
- *ll_root_addr = ll_dev;
-}
-
/*
- * Creates a linked list of a given size.
- */
-static struct virtio_net_data_ll *
-alloc_data_ll(uint32_t size)
-{
- struct virtio_net_data_ll *ll_new;
- uint32_t i;
-
- /* Malloc and then chain the linked list. */
- ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
- if (ll_new == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
- return NULL;
- }
-
- for (i = 0; i < size - 1; i++) {
- ll_new[i].vdev = NULL;
- ll_new[i].next = &ll_new[i+1];
- }
- ll_new[i].next = NULL;
-
- return ll_new;
-}
-
-/*
- * Create the main linked list along with each individual cores linked list. A used and a free list
- * are created to manage entries.
- */
-static int
-init_data_ll (void)
-{
- int lcore;
-
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
- if (lcore_info[lcore].lcore_ll == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
- return -1;
- }
-
- lcore_info[lcore].lcore_ll->device_num = 0;
- lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
- lcore_info[lcore].lcore_ll->ll_root_used = NULL;
- if (num_devices % num_switching_cores)
- lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
- else
- lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
- }
-
- /* Allocate devices up to a maximum of MAX_DEVICES. */
- ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
-
- return 0;
-}
-
-/*
- * Remove a device from the specific data core linked list and from the main linked list. Synchonization
- * occurs through the use of the lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
+ * Remove a device from the specific data core linked list and from the
+ * main linked list. Synchonization occurs through the use of the
+ * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
* of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
*/
static void
-destroy_device (volatile struct virtio_net *dev)
+destroy_device(int vid)
{
- struct virtio_net_data_ll *ll_lcore_dev_cur;
- struct virtio_net_data_ll *ll_main_dev_cur;
- struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
- struct virtio_net_data_ll *ll_main_dev_last = NULL;
- struct vhost_dev *vdev;
+ struct vhost_dev *vdev = NULL;
int lcore;
- dev->flags &= ~VIRTIO_DEV_RUNNING;
-
- vdev = (struct vhost_dev *)dev->priv;
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (vdev->vid == vid)
+ break;
+ }
+ if (!vdev)
+ return;
/*set the remove flag. */
vdev->remove = 1;
while(vdev->ready != DEVICE_SAFE_REMOVE) {
rte_pause();
}
- /* Search for entry to be removed from lcore ll */
- ll_lcore_dev_cur = lcore_info[vdev->coreid].lcore_ll->ll_root_used;
- while (ll_lcore_dev_cur != NULL) {
- if (ll_lcore_dev_cur->vdev == vdev) {
- break;
- } else {
- ll_lcore_dev_last = ll_lcore_dev_cur;
- ll_lcore_dev_cur = ll_lcore_dev_cur->next;
- }
- }
-
- if (ll_lcore_dev_cur == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find the dev to be destroy.\n",
- dev->device_fh);
- return;
- }
+ TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
+ lcore_vdev_entry);
+ TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
- /* Search for entry to be removed from main ll */
- ll_main_dev_cur = ll_root_used;
- ll_main_dev_last = NULL;
- while (ll_main_dev_cur != NULL) {
- if (ll_main_dev_cur->vdev == vdev) {
- break;
- } else {
- ll_main_dev_last = ll_main_dev_cur;
- ll_main_dev_cur = ll_main_dev_cur->next;
- }
- }
-
- /* Remove entries from the lcore and main ll. */
- rm_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
- rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
/* Set the dev_removal_flag on each lcore. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
- }
+ RTE_LCORE_FOREACH_SLAVE(lcore)
+ lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
/*
- * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
- * they can no longer access the device removed from the linked lists and that the devices
- * are no longer in use.
+ * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
+ * we can be sure that they can no longer access the device removed
+ * from the linked lists and that the devices are no longer in use.
*/
RTE_LCORE_FOREACH_SLAVE(lcore) {
- while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
+ while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
rte_pause();
- }
}
- /* Add the entries back to the lcore and main free ll.*/
- put_data_ll_free_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
- put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
+ lcore_info[vdev->coreid].device_num--;
- /* Decrement number of device on the lcore. */
- lcore_info[vdev->coreid].lcore_ll->device_num--;
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) device has been removed from data core\n",
+ vdev->vid);
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
-
- if (zero_copy) {
- struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
-
- /* Stop the RX queue. */
- if (rte_eth_dev_rx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In destroy_device: Failed to stop "
- "rx queue:%d\n",
- dev->device_fh,
- vdev->vmdq_rx_q);
- }
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") in destroy_device: Start put mbuf in "
- "mempool back to ring for RX queue: %d\n",
- dev->device_fh, vdev->vmdq_rx_q);
-
- mbuf_destroy_zcp(vpool);
-
- /* Stop the TX queue. */
- if (rte_eth_dev_tx_queue_stop(ports[0], vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In destroy_device: Failed to "
- "stop tx queue:%d\n",
- dev->device_fh, vdev->vmdq_rx_q);
- }
-
- vpool = &vpool_array[vdev->vmdq_rx_q + MAX_QUEUES];
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") destroy_device: Start put mbuf in mempool "
- "back to ring for TX queue: %d, dev:(%"PRIu64")\n",
- dev->device_fh, (vdev->vmdq_rx_q + MAX_QUEUES),
- dev->device_fh);
-
- mbuf_destroy_zcp(vpool);
- rte_free(vdev->regions_hpa);
- }
rte_free(vdev);
-
-}
-
-/*
- * Calculate the region count of physical continous regions for one particular
- * region of whose vhost virtual address is continous. The particular region
- * start from vva_start, with size of 'size' in argument.
- */
-static uint32_t
-check_hpa_regions(uint64_t vva_start, uint64_t size)
-{
- uint32_t i, nregions = 0, page_size = getpagesize();
- uint64_t cur_phys_addr = 0, next_phys_addr = 0;
- if (vva_start % page_size) {
- LOG_DEBUG(VHOST_CONFIG,
- "in check_countinous: vva start(%p) mod page_size(%d) "
- "has remainder\n",
- (void *)(uintptr_t)vva_start, page_size);
- return 0;
- }
- if (size % page_size) {
- LOG_DEBUG(VHOST_CONFIG,
- "in check_countinous: "
- "size((%"PRIu64")) mod page_size(%d) has remainder\n",
- size, page_size);
- return 0;
- }
- for (i = 0; i < size - page_size; i = i + page_size) {
- cur_phys_addr
- = rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i));
- next_phys_addr = rte_mem_virt2phy(
- (void *)(uintptr_t)(vva_start + i + page_size));
- if ((cur_phys_addr + page_size) != next_phys_addr) {
- ++nregions;
- LOG_DEBUG(VHOST_CONFIG,
- "in check_continuous: hva addr:(%p) is not "
- "continuous with hva addr:(%p), diff:%d\n",
- (void *)(uintptr_t)(vva_start + (uint64_t)i),
- (void *)(uintptr_t)(vva_start + (uint64_t)i
- + page_size), page_size);
- LOG_DEBUG(VHOST_CONFIG,
- "in check_continuous: hpa addr:(%p) is not "
- "continuous with hpa addr:(%p), "
- "diff:(%"PRIu64")\n",
- (void *)(uintptr_t)cur_phys_addr,
- (void *)(uintptr_t)next_phys_addr,
- (next_phys_addr-cur_phys_addr));
- }
- }
- return nregions;
-}
-
-/*
- * Divide each region whose vhost virtual address is continous into a few
- * sub-regions, make sure the physical address within each sub-region are
- * continous. And fill offset(to GPA) and size etc. information of each
- * sub-region into regions_hpa.
- */
-static uint32_t
-fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct virtio_memory *virtio_memory)
-{
- uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = getpagesize();
- uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start;
-
- if (mem_region_hpa == NULL)
- return 0;
-
- for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) {
- vva_start = virtio_memory->regions[regionidx].guest_phys_address +
- virtio_memory->regions[regionidx].address_offset;
- mem_region_hpa[regionidx_hpa].guest_phys_address
- = virtio_memory->regions[regionidx].guest_phys_address;
- mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
- rte_mem_virt2phy((void *)(uintptr_t)(vva_start)) -
- mem_region_hpa[regionidx_hpa].guest_phys_address;
- LOG_DEBUG(VHOST_CONFIG,
- "in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].guest_phys_address));
- LOG_DEBUG(VHOST_CONFIG,
- "in fill_hpa_regions: host phys addr start[%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
- for (i = 0, k = 0;
- i < virtio_memory->regions[regionidx].memory_size -
- page_size;
- i += page_size) {
- cur_phys_addr = rte_mem_virt2phy(
- (void *)(uintptr_t)(vva_start + i));
- next_phys_addr = rte_mem_virt2phy(
- (void *)(uintptr_t)(vva_start +
- i + page_size));
- if ((cur_phys_addr + page_size) != next_phys_addr) {
- mem_region_hpa[regionidx_hpa].guest_phys_address_end =
- mem_region_hpa[regionidx_hpa].guest_phys_address +
- k + page_size;
- mem_region_hpa[regionidx_hpa].memory_size
- = k + page_size;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
- "phys addr end [%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].guest_phys_address_end));
- LOG_DEBUG(VHOST_CONFIG,
- "in fill_hpa_regions: guest phys addr "
- "size [%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].memory_size));
- mem_region_hpa[regionidx_hpa + 1].guest_phys_address
- = mem_region_hpa[regionidx_hpa].guest_phys_address_end;
- ++regionidx_hpa;
- mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
- next_phys_addr -
- mem_region_hpa[regionidx_hpa].guest_phys_address;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
- " phys addr start[%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].guest_phys_address));
- LOG_DEBUG(VHOST_CONFIG,
- "in fill_hpa_regions: host phys addr "
- "start[%d]:(%p)\n",
- regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
- k = 0;
- } else {
- k += page_size;
- }
- }
- mem_region_hpa[regionidx_hpa].guest_phys_address_end
- = mem_region_hpa[regionidx_hpa].guest_phys_address
- + k + page_size;
- mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end "
- "[%d]:(%p)\n", regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].guest_phys_address_end));
- LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
- "[%d]:(%p)\n", regionidx_hpa,
- (void *)(uintptr_t)
- (mem_region_hpa[regionidx_hpa].memory_size));
- ++regionidx_hpa;
- }
- return regionidx_hpa;
}
/*
@@ -2654,148 +1224,23 @@ fill_hpa_memory_regions(struct virtio_memory_regions_hpa *mem_region_hpa, struct
* and the allocated to a specific data core.
*/
static int
-new_device (struct virtio_net *dev)
+new_device(int vid)
{
- struct virtio_net_data_ll *ll_dev;
int lcore, core_add = 0;
uint32_t device_num_min = num_devices;
struct vhost_dev *vdev;
- uint32_t regionidx;
vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
- dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) couldn't allocate memory for vhost dev\n",
+ vid);
return -1;
}
- vdev->dev = dev;
- dev->priv = vdev;
-
- if (zero_copy) {
- vdev->nregions_hpa = dev->mem->nregions;
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- vdev->nregions_hpa
- += check_hpa_regions(
- dev->mem->regions[regionidx].guest_phys_address
- + dev->mem->regions[regionidx].address_offset,
- dev->mem->regions[regionidx].memory_size);
+ vdev->vid = vid;
- }
-
- vdev->regions_hpa = rte_calloc("vhost hpa region",
- vdev->nregions_hpa,
- sizeof(struct virtio_memory_regions_hpa),
- RTE_CACHE_LINE_SIZE);
- if (vdev->regions_hpa == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n");
- rte_free(vdev);
- return -1;
- }
-
-
- if (fill_hpa_memory_regions(
- vdev->regions_hpa, dev->mem
- ) != vdev->nregions_hpa) {
-
- RTE_LOG(ERR, VHOST_CONFIG,
- "hpa memory regions number mismatch: "
- "[%d]\n", vdev->nregions_hpa);
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
- }
-
-
- /* Add device to main ll */
- ll_dev = get_data_ll_free_entry(&ll_root_free);
- if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
- "of %d devices per core has been reached\n",
- dev->device_fh, num_devices);
- if (vdev->regions_hpa)
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
- ll_dev->vdev = vdev;
- add_data_ll_entry(&ll_root_used, ll_dev);
- vdev->vmdq_rx_q
- = dev->device_fh * queues_per_pool + vmdq_queue_base;
-
- if (zero_copy) {
- uint32_t index = vdev->vmdq_rx_q;
- uint32_t count_in_ring, i;
- struct mbuf_table *tx_q;
-
- count_in_ring = rte_ring_count(vpool_array[index].ring);
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") in new_device: mbuf count in mempool "
- "before attach is: %d\n",
- dev->device_fh,
- rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") in new_device: mbuf count in ring "
- "before attach is : %d\n",
- dev->device_fh, count_in_ring);
-
- /*
- * Attach all mbufs in vpool.ring and put back intovpool.pool.
- */
- for (i = 0; i < count_in_ring; i++)
- attach_rxmbuf_zcp(dev);
-
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
- "mempool after attach is: %d\n",
- dev->device_fh,
- rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
- "ring after attach is : %d\n",
- dev->device_fh,
- rte_ring_count(vpool_array[index].ring));
-
- tx_q = &tx_queue_zcp[(uint16_t)vdev->vmdq_rx_q];
- tx_q->txq_id = vdev->vmdq_rx_q;
-
- if (rte_eth_dev_tx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) {
- struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In new_device: Failed to start "
- "tx queue:%d\n",
- dev->device_fh, vdev->vmdq_rx_q);
-
- mbuf_destroy_zcp(vpool);
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
-
- if (rte_eth_dev_rx_queue_start(ports[0], vdev->vmdq_rx_q) != 0) {
- struct vpool *vpool = &vpool_array[vdev->vmdq_rx_q];
-
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In new_device: Failed to start "
- "rx queue:%d\n",
- dev->device_fh, vdev->vmdq_rx_q);
-
- /* Stop the TX queue. */
- if (rte_eth_dev_tx_queue_stop(ports[0],
- vdev->vmdq_rx_q) != 0) {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") In new_device: Failed to "
- "stop tx queue:%d\n",
- dev->device_fh, vdev->vmdq_rx_q);
- }
-
- mbuf_destroy_zcp(vpool);
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
-
- }
+ TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
+ vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
/*reset ready flag*/
vdev->ready = DEVICE_MAC_LEARNING;
@@ -2803,36 +1248,24 @@ new_device (struct virtio_net *dev)
/* Find a suitable lcore to add the device. */
RTE_LCORE_FOREACH_SLAVE(lcore) {
- if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
- device_num_min = lcore_info[lcore].lcore_ll->device_num;
+ if (lcore_info[lcore].device_num < device_num_min) {
+ device_num_min = lcore_info[lcore].device_num;
core_add = lcore;
}
}
- /* Add device to lcore ll */
- ll_dev = get_data_ll_free_entry(&lcore_info[core_add].lcore_ll->ll_root_free);
- if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
- vdev->ready = DEVICE_SAFE_REMOVE;
- destroy_device(dev);
- rte_free(vdev->regions_hpa);
- rte_free(vdev);
- return -1;
- }
- ll_dev->vdev = vdev;
vdev->coreid = core_add;
- add_data_ll_entry(&lcore_info[vdev->coreid].lcore_ll->ll_root_used, ll_dev);
-
- /* Initialize device stats */
- memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
+ TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
+ lcore_vdev_entry);
+ lcore_info[vdev->coreid].device_num++;
/* Disable notifications. */
- rte_vhost_enable_guest_notification(dev, VIRTIO_RXQ, 0);
- rte_vhost_enable_guest_notification(dev, VIRTIO_TXQ, 0);
- lcore_info[vdev->coreid].lcore_ll->device_num++;
- dev->flags |= VIRTIO_DEV_RUNNING;
+ rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
+ rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, vdev->coreid);
+ RTE_LOG(INFO, VHOST_DATA,
+ "(%d) device has been added to data core %d\n",
+ vid, vdev->coreid);
return 0;
}
@@ -2854,10 +1287,9 @@ static const struct virtio_net_device_ops virtio_net_device_ops =
static void
print_stats(void)
{
- struct virtio_net_data_ll *dev_ll;
+ struct vhost_dev *vdev;
uint64_t tx_dropped, rx_dropped;
uint64_t tx, tx_total, rx, rx_total;
- uint32_t device_fh;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
@@ -2865,77 +1297,32 @@ print_stats(void)
sleep(enable_stats);
/* Clear screen and move to top left */
- printf("%s%s", clr, top_left);
-
- printf("\nDevice statistics ====================================");
+ printf("%s%s\n", clr, top_left);
+ printf("Device statistics =================================\n");
- dev_ll = ll_root_used;
- while (dev_ll != NULL) {
- device_fh = (uint32_t)dev_ll->vdev->dev->device_fh;
- tx_total = dev_statistics[device_fh].tx_total;
- tx = dev_statistics[device_fh].tx;
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ tx_total = vdev->stats.tx_total;
+ tx = vdev->stats.tx;
tx_dropped = tx_total - tx;
- if (zero_copy == 0) {
- rx_total = rte_atomic64_read(
- &dev_statistics[device_fh].rx_total_atomic);
- rx = rte_atomic64_read(
- &dev_statistics[device_fh].rx_atomic);
- } else {
- rx_total = dev_statistics[device_fh].rx_total;
- rx = dev_statistics[device_fh].rx;
- }
+
+ rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
+ rx = rte_atomic64_read(&vdev->stats.rx_atomic);
rx_dropped = rx_total - rx;
- printf("\nStatistics for device %"PRIu32" ------------------------------"
- "\nTX total: %"PRIu64""
- "\nTX dropped: %"PRIu64""
- "\nTX successful: %"PRIu64""
- "\nRX total: %"PRIu64""
- "\nRX dropped: %"PRIu64""
- "\nRX successful: %"PRIu64"",
- device_fh,
- tx_total,
- tx_dropped,
- tx,
- rx_total,
- rx_dropped,
- rx);
-
- dev_ll = dev_ll->next;
+ printf("Statistics for device %d\n"
+ "-----------------------\n"
+ "TX total: %" PRIu64 "\n"
+ "TX dropped: %" PRIu64 "\n"
+ "TX successful: %" PRIu64 "\n"
+ "RX total: %" PRIu64 "\n"
+ "RX dropped: %" PRIu64 "\n"
+ "RX successful: %" PRIu64 "\n",
+ vdev->vid,
+ tx_total, tx_dropped, tx,
+ rx_total, rx_dropped, rx);
}
- printf("\n======================================================\n");
- }
-}
-static void
-setup_mempool_tbl(int socket, uint32_t index, char *pool_name,
- char *ring_name, uint32_t nb_mbuf)
-{
- vpool_array[index].pool = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
- MBUF_CACHE_SIZE_ZCP, 0, MBUF_DATA_SIZE_ZCP, socket);
- if (vpool_array[index].pool != NULL) {
- vpool_array[index].ring
- = rte_ring_create(ring_name,
- rte_align32pow2(nb_mbuf + 1),
- socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
- if (likely(vpool_array[index].ring != NULL)) {
- LOG_DEBUG(VHOST_CONFIG,
- "in setup_mempool_tbl: mbuf count in "
- "mempool is: %d\n",
- rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(VHOST_CONFIG,
- "in setup_mempool_tbl: mbuf count in "
- "ring is: %d\n",
- rte_ring_count(vpool_array[index].ring));
- } else {
- rte_exit(EXIT_FAILURE, "ring_create(%s) failed",
- ring_name);
- }
-
- /* Need consider head room. */
- vpool_array[index].buf_size = VIRTIO_DESCRIPTOR_LEN_ZCP;
- } else {
- rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", pool_name);
+ printf("===================================================\n");
}
}
@@ -2951,20 +1338,70 @@ sigint_handler(__rte_unused int signum)
}
/*
+ * While creating an mbuf pool, one key thing is to figure out how
+ * many mbuf entries is enough for our use. FYI, here are some
+ * guidelines:
+ *
+ * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
+ *
+ * - For each switch core (A CPU core does the packet switch), we need
+ * also make some reservation for receiving the packets from virtio
+ * Tx queue. How many is enough depends on the usage. It's normally
+ * a simple calculation like following:
+ *
+ * MAX_PKT_BURST * max packet size / mbuf size
+ *
+ * So, we definitely need allocate more mbufs when TSO is enabled.
+ *
+ * - Similarly, for each switching core, we should serve @nr_rx_desc
+ * mbufs for receiving the packets from physical NIC device.
+ *
+ * - We also need make sure, for each switch core, we have allocated
+ * enough mbufs to fill up the mbuf cache.
+ */
+static void
+create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
+ uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
+{
+ uint32_t nr_mbufs;
+ uint32_t nr_mbufs_per_core;
+ uint32_t mtu = 1500;
+
+ if (mergeable)
+ mtu = 9000;
+ if (enable_tso)
+ mtu = 64 * 1024;
+
+ nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
+ (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
+ nr_mbufs_per_core += nr_rx_desc;
+ nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
+
+ nr_mbufs = nr_queues * nr_rx_desc;
+ nr_mbufs += nr_mbufs_per_core * nr_switch_core;
+ nr_mbufs *= nr_port;
+
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
+ nr_mbuf_cache, 0, mbuf_size,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+}
+
+/*
* Main function, does initialisation and calls the per-lcore functions. The CUSE
* device is also registered here to handle the IOCTLs.
*/
int
main(int argc, char *argv[])
{
- struct rte_mempool *mbuf_pool = NULL;
unsigned lcore_id, core_id = 0;
unsigned nb_ports, valid_num_ports;
int ret;
uint8_t portid;
- uint16_t queue_id;
static pthread_t tid;
char thread_name[RTE_MAX_THREAD_NAME_LEN];
+ uint64_t flags = 0;
signal(SIGINT, sigint_handler);
@@ -2981,19 +1418,16 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Invalid argument\n");
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
+ TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
+
if (rte_lcore_is_enabled(lcore_id))
lcore_ids[core_id ++] = lcore_id;
if (rte_lcore_count() > RTE_MAX_LCORE)
rte_exit(EXIT_FAILURE,"Not enough cores\n");
- /*set the number of swithcing cores available*/
- num_switching_cores = rte_lcore_count()-1;
-
/* Get the number of physical ports. */
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NUM_PORTS and global array PORTS
@@ -3007,64 +1441,21 @@ main(int argc, char *argv[])
return -1;
}
- if (zero_copy == 0) {
- /* Create the mbuf pool. */
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
- NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE,
- 0, MBUF_DATA_SIZE, rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-
- for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++)
- vpool_array[queue_id].pool = mbuf_pool;
-
- if (vm2vm_mode == VM2VM_HARDWARE) {
- /* Enable VT loop back to let L2 switch to do it. */
- vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
- LOG_DEBUG(VHOST_CONFIG,
- "Enable loop back for L2 switch in vmdq.\n");
- }
- } else {
- uint32_t nb_mbuf;
- char pool_name[RTE_MEMPOOL_NAMESIZE];
- char ring_name[RTE_MEMPOOL_NAMESIZE];
-
- nb_mbuf = num_rx_descriptor
- + num_switching_cores * MBUF_CACHE_SIZE_ZCP
- + num_switching_cores * MAX_PKT_BURST;
-
- for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) {
- snprintf(pool_name, sizeof(pool_name),
- "rxmbuf_pool_%u", queue_id);
- snprintf(ring_name, sizeof(ring_name),
- "rxmbuf_ring_%u", queue_id);
- setup_mempool_tbl(rte_socket_id(), queue_id,
- pool_name, ring_name, nb_mbuf);
- }
-
- nb_mbuf = num_tx_descriptor
- + num_switching_cores * MBUF_CACHE_SIZE_ZCP
- + num_switching_cores * MAX_PKT_BURST;
-
- for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) {
- snprintf(pool_name, sizeof(pool_name),
- "txmbuf_pool_%u", queue_id);
- snprintf(ring_name, sizeof(ring_name),
- "txmbuf_ring_%u", queue_id);
- setup_mempool_tbl(rte_socket_id(),
- (queue_id + MAX_QUEUES),
- pool_name, ring_name, nb_mbuf);
- }
+ /*
+ * FIXME: here we are trying to allocate mbufs big enough for
+ * @MAX_QUEUES, but the truth is we're never going to use that
+ * many queues here. We probably should only do allocation for
+ * those queues we are going to use.
+ */
+ create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
+ MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
- if (vm2vm_mode == VM2VM_HARDWARE) {
- /* Enable VT loop back to let L2 switch to do it. */
- vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
- LOG_DEBUG(VHOST_CONFIG,
- "Enable loop back for L2 switch in vmdq.\n");
- }
+ if (vm2vm_mode == VM2VM_HARDWARE) {
+ /* Enable VT loop back to let L2 switch to do it. */
+ vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "Enable loop back for L2 switch in vmdq.\n");
}
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
@@ -3079,13 +1470,6 @@ main(int argc, char *argv[])
"Cannot initialize network ports\n");
}
- /* Initialise all linked lists. */
- if (init_data_ll() == -1)
- rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
-
- /* Initialize device stats */
- memset(&dev_statistics, 0, sizeof(dev_statistics));
-
/* Enable stats if the user option is set. */
if (enable_stats) {
ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
@@ -3097,54 +1481,22 @@ main(int argc, char *argv[])
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
ret = rte_thread_setname(tid, thread_name);
if (ret != 0)
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"Cannot set print-stats name\n");
}
/* Launch all data cores. */
- if (zero_copy == 0) {
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- rte_eal_remote_launch(switch_worker,
- mbuf_pool, lcore_id);
- }
- } else {
- uint32_t count_in_mempool, index, i;
- for (index = 0; index < 2*MAX_QUEUES; index++) {
- /* For all RX and TX queues. */
- count_in_mempool
- = rte_mempool_count(vpool_array[index].pool);
-
- /*
- * Transfer all un-attached mbufs from vpool.pool
- * to vpoo.ring.
- */
- for (i = 0; i < count_in_mempool; i++) {
- struct rte_mbuf *mbuf
- = __rte_mbuf_raw_alloc(
- vpool_array[index].pool);
- rte_ring_sp_enqueue(vpool_array[index].ring,
- (void *)mbuf);
- }
-
- LOG_DEBUG(VHOST_CONFIG,
- "in main: mbuf count in mempool at initial "
- "is: %d\n", count_in_mempool);
- LOG_DEBUG(VHOST_CONFIG,
- "in main: mbuf count in ring at initial is :"
- " %d\n",
- rte_ring_count(vpool_array[index].ring));
- }
-
- RTE_LCORE_FOREACH_SLAVE(lcore_id)
- rte_eal_remote_launch(switch_worker_zcp, NULL,
- lcore_id);
- }
+ RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ rte_eal_remote_launch(switch_worker, NULL, lcore_id);
if (mergeable == 0)
rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
+ if (client_mode)
+ flags |= RTE_VHOST_USER_CLIENT;
+
/* Register vhost(cuse or user) driver to handle vhost messages. */
- ret = rte_vhost_driver_register((char *)&dev_basename);
+ ret = rte_vhost_driver_register(dev_basename, flags);
if (ret != 0)
rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index d04e2be2..6bb42e89 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,48 +34,23 @@
#ifndef _MAIN_H_
#define _MAIN_H_
-//#define DEBUG
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) do { \
- RTE_LOG(DEBUG, log_type, fmt, ##args); \
-} while (0)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do{} while(0)
-#endif
+#include <sys/queue.h>
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
#define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
-/**
- * Information relating to memory regions including offsets to
- * addresses in host physical space.
- */
-struct virtio_memory_regions_hpa {
- /**< Base guest physical address of region. */
- uint64_t guest_phys_address;
- /**< End guest physical address of region. */
- uint64_t guest_phys_address_end;
- /**< Size of region. */
- uint64_t memory_size;
- /**< Offset of region for gpa to hpa translation. */
- uint64_t host_phys_addr_offset;
+struct device_statistics {
+ uint64_t tx;
+ uint64_t tx_total;
+ rte_atomic64_t rx_atomic;
+ rte_atomic64_t rx_total_atomic;
};
-/*
- * Device linked list structure for data path.
- */
struct vhost_dev {
- /**< Pointer to device created by vhost lib. */
- struct virtio_net *dev;
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
- /**< Memory region information for gpa to hpa translation. */
- struct virtio_memory_regions_hpa *regions_hpa;
/**< Device MAC address (Obtained on first TX packet). */
struct ether_addr mac_address;
/**< RX VMDQ queue number. */
@@ -88,28 +63,29 @@ struct vhost_dev {
volatile uint8_t ready;
/**< Device is marked for removal from the data core. */
volatile uint8_t remove;
+
+ int vid;
+ struct device_statistics stats;
+ TAILQ_ENTRY(vhost_dev) global_vdev_entry;
+ TAILQ_ENTRY(vhost_dev) lcore_vdev_entry;
} __rte_cache_aligned;
-struct virtio_net_data_ll
-{
- struct vhost_dev *vdev; /* Pointer to device created by configuration core. */
- struct virtio_net_data_ll *next; /* Pointer to next device in linked list. */
-};
+TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
+
+
+#define REQUEST_DEV_REMOVAL 1
+#define ACK_DEV_REMOVAL 0
/*
* Structure containing data core specific information.
*/
-struct lcore_ll_info
-{
- struct virtio_net_data_ll *ll_root_free; /* Pointer to head in free linked list. */
- struct virtio_net_data_ll *ll_root_used; /* Pointer to head of used linked list. */
- uint32_t device_num; /* Number of devices on lcore. */
- volatile uint8_t dev_removal_flag; /* Flag to synchronize device removal. */
-};
+struct lcore_info {
+ uint32_t device_num;
+
+ /* Flag to synchronize device removal. */
+ volatile uint8_t dev_removal_flag;
-struct lcore_info
-{
- struct lcore_ll_info *lcore_ll; /* Pointer to data core specific lcore_ll_info struct */
+ struct vhost_dev_tailq_list vdev_list;
};
#endif /* _MAIN_H_ */
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
index d83138d6..2e403576 100644
--- a/examples/vhost_xen/main.c
+++ b/examples/vhost_xen/main.c
@@ -507,32 +507,6 @@ static unsigned check_ports_num(unsigned nb_ports)
}
/*
- * Macro to print out packet contents. Wrapped in debug define so that the
- * data path is not effected when debug is disabled.
- */
-#ifdef DEBUG
-#define PRINT_PACKET(device, addr, size, header) do { \
- char *pkt_addr = (char*)(addr); \
- unsigned int index; \
- char packet[MAX_PRINT_BUFF]; \
- \
- if ((header)) \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Header size %d: ", (device->device_fh), (size)); \
- else \
- snprintf(packet, MAX_PRINT_BUFF, "(%"PRIu64") Packet size %d: ", (device->device_fh), (size)); \
- for (index = 0; index < (size); index++) { \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), \
- "%02hhx ", pkt_addr[index]); \
- } \
- snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
- \
- LOG_DEBUG(VHOST_DATA, "%s", packet); \
-} while(0)
-#else
-#define PRINT_PACKET(device, addr, size, header) do{} while(0)
-#endif
-
-/*
* Function to convert guest physical addresses to vhost virtual addresses. This
* is used to convert virtio buffer addresses.
*/
@@ -551,7 +525,7 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
break;
}
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n",
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
return vhost_va;
@@ -581,7 +555,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
uint8_t success = 0;
void *userdata;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
vq = dev->virtqueue_rx;
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/* As many data cores may want access to available buffers, they need to be reserved. */
@@ -606,7 +580,8 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
+ dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
@@ -800,17 +775,22 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
/* Drop the packet if the TX packet is destined for the TX device. */
if (dev_ll->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
- dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "Source and destination MAC addresses are the same. "
+ "Dropping packet.\n",
+ dev_ll->dev->device_fh);
return 0;
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "MAC address is local\n", dev_ll->dev->device_fh);
if (dev_ll->dev->remove) {
/*drop the packet if the device is marked for removal*/
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
+ "Device is marked for removal\n",
+ dev_ll->dev->device_fh);
} else {
/*send the packet to the local virtio device*/
ret = virtio_dev_rx(dev_ll->dev, &m, 1);
@@ -849,7 +829,8 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
return;
}
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ "MAC address is external\n", dev->device_fh);
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
@@ -922,7 +903,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
if (vq->last_used_idx == avail_idx)
return;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
+ dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
@@ -931,7 +913,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
free_entries = avail_idx - vq->last_used_idx;
free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
+ RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
+ dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
@@ -1020,7 +1003,9 @@ switch_worker(__attribute__((unused)) void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
+ RTE_LOG(DEBUG, VHOST_DATA,
+ "TX queue drained after timeout with burst size %u\n",
+ tx_q->len);
/*Tx any packets in the queue*/
ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
@@ -1460,8 +1445,6 @@ main(int argc, char *argv[])
/* Get the number of physical ports. */
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NUM_PORTS and global array PORTS
@@ -1482,9 +1465,6 @@ main(int argc, char *argv[])
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
- /* Set log level. */
- rte_set_log_level(LOG_LEVEL);
-
/* initialize all ports */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
@@ -1514,7 +1494,7 @@ main(int argc, char *argv[])
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-xen-stats");
ret = rte_thread_setname(tid, thread_name);
if (ret != 0)
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(DEBUG, VHOST_CONFIG,
"Cannot set print-stats name\n");
}
diff --git a/examples/vhost_xen/main.h b/examples/vhost_xen/main.h
index 481572e6..5ff48fd9 100644
--- a/examples/vhost_xen/main.h
+++ b/examples/vhost_xen/main.h
@@ -34,17 +34,6 @@
#ifndef _MAIN_H_
#define _MAIN_H_
-//#define DEBUG
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) \
- RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do{} while(0)
-#endif
-
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 22c2ddd5..e068ae28 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -667,6 +667,7 @@ add_vm(const char *vm_name)
return -1;
}
strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
+ new_domain->name[sizeof(new_domain->name) - 1] = '\0';
new_domain->channel_mask = 0;
new_domain->num_channels = 0;
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 178af2f5..360492ba 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -599,8 +599,6 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Not enough cores\n");
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NUM_PORTS and global array PORTS
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index 62e1422a..617263b4 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -662,8 +662,6 @@ main(int argc, char *argv[])
}
nb_ports = rte_eth_dev_count();
- if (nb_ports > RTE_MAX_ETHPORTS)
- nb_ports = RTE_MAX_ETHPORTS;
/*
* Update the global var NUM_PORTS and global array PORTS
diff --git a/lib/Makefile b/lib/Makefile
index f254dba8..ca7c02fd 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -57,6 +57,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PORT) += librte_port
DIRS-$(CONFIG_RTE_LIBRTE_TABLE) += librte_table
DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += librte_pipeline
DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += librte_reorder
+DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += librte_pdump
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
DIRS-$(CONFIG_RTE_LIBRTE_KNI) += librte_kni
diff --git a/lib/librte_acl/Makefile b/lib/librte_acl/Makefile
index 2e394c97..9803e9dd 100644
--- a/lib/librte_acl/Makefile
+++ b/lib/librte_acl/Makefile
@@ -73,7 +73,7 @@ else
$(shell $(CC) -march=core-avx2 -dM -E - </dev/null 2>&1 | \
grep -q AVX2 && echo 1)
ifeq ($(CC_AVX2_SUPPORT), 1)
- ifeq ($(CC), icc)
+ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
CFLAGS_acl_run_avx2.o += -march=core-avx2
else
CFLAGS_acl_run_avx2.o += -mavx2
diff --git a/lib/librte_cfgfile/rte_cfgfile.c b/lib/librte_cfgfile/rte_cfgfile.c
index 75625a28..d72052a0 100644
--- a/lib/librte_cfgfile/rte_cfgfile.c
+++ b/lib/librte_cfgfile/rte_cfgfile.c
@@ -232,6 +232,7 @@ rte_cfgfile_load(const char *filename, int flags)
return cfg;
error1:
+ cfg->num_sections = curr_section + 1;
rte_cfgfile_close(cfg);
error2:
fclose(f);
diff --git a/lib/librte_cfgfile/rte_cfgfile.h b/lib/librte_cfgfile/rte_cfgfile.h
index 834f8287..f649836c 100644
--- a/lib/librte_cfgfile/rte_cfgfile.h
+++ b/lib/librte_cfgfile/rte_cfgfile.h
@@ -72,7 +72,7 @@ struct rte_cfgfile_entry {
* @param flags
* Config file flags, Reserved for future use. Must be set to 0.
* @return
-* Handle to configuration file
+* Handle to configuration file on success, NULL otherwise
*/
struct rte_cfgfile *rte_cfgfile_load(const char *filename, int flags);
diff --git a/lib/librte_cmdline/cmdline.c b/lib/librte_cmdline/cmdline.c
index c405878e..a9c47be3 100644
--- a/lib/librte_cmdline/cmdline.c
+++ b/lib/librte_cmdline/cmdline.c
@@ -130,6 +130,7 @@ struct cmdline *
cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out)
{
struct cmdline *cl;
+ int ret;
if (!ctx || !prompt)
return NULL;
@@ -142,8 +143,13 @@ cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out)
cl->s_out = s_out;
cl->ctx = ctx;
- rdline_init(&cl->rdl, cmdline_write_char,
- cmdline_valid_buffer, cmdline_complete_buffer);
+ ret = rdline_init(&cl->rdl, cmdline_write_char, cmdline_valid_buffer,
+ cmdline_complete_buffer);
+ if (ret != 0) {
+ free(cl);
+ return NULL;
+ }
+
cl->rdl.opaque = cl;
cmdline_set_prompt(cl, prompt);
rdline_newline(&cl->rdl, cl->prompt);
diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c
index 24a6ed67..b496067a 100644
--- a/lib/librte_cmdline/cmdline_parse.c
+++ b/lib/librte_cmdline/cmdline_parse.c
@@ -118,6 +118,14 @@ cmdline_isendoftoken(char c)
return 0;
}
+int
+cmdline_isendofcommand(char c)
+{
+ if (!c || iscomment(c) || isendofline(c))
+ return 1;
+ return 0;
+}
+
static unsigned int
nb_common_chars(const char * s1, const char * s2)
{
diff --git a/lib/librte_cmdline/cmdline_parse.h b/lib/librte_cmdline/cmdline_parse.h
index 4b25c456..4ac05d6b 100644
--- a/lib/librte_cmdline/cmdline_parse.h
+++ b/lib/librte_cmdline/cmdline_parse.h
@@ -184,6 +184,9 @@ int cmdline_complete(struct cmdline *cl, const char *buf, int *state,
* isendofline(c)) */
int cmdline_isendoftoken(char c);
+/* return true if(!c || iscomment(c) || isendofline(c)) */
+int cmdline_isendofcommand(char c);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cmdline/cmdline_parse_string.c b/lib/librte_cmdline/cmdline_parse_string.c
index 45883b3e..35917a7b 100644
--- a/lib/librte_cmdline/cmdline_parse_string.c
+++ b/lib/librte_cmdline/cmdline_parse_string.c
@@ -76,9 +76,10 @@ struct cmdline_token_ops cmdline_token_string_ops = {
.get_help = cmdline_get_help_string,
};
-#define MULTISTRING_HELP "Mul-choice STRING"
-#define ANYSTRING_HELP "Any STRING"
-#define FIXEDSTRING_HELP "Fixed STRING"
+#define CHOICESTRING_HELP "Mul-choice STRING"
+#define ANYSTRING_HELP "Any STRING"
+#define ANYSTRINGS_HELP "Any STRINGS"
+#define FIXEDSTRING_HELP "Fixed STRING"
static unsigned int
get_token_len(const char *s)
@@ -123,8 +124,8 @@ cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
sd = &tk2->string_data;
- /* fixed string */
- if (sd->str) {
+ /* fixed string (known single token) */
+ if ((sd->str != NULL) && (strcmp(sd->str, TOKEN_STRING_MULTI) != 0)) {
str = sd->str;
do {
token_len = get_token_len(str);
@@ -148,7 +149,21 @@ cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
if (!str)
return -1;
}
- /* unspecified string */
+ /* multi string */
+ else if (sd->str != NULL) {
+ if (ressize < STR_MULTI_TOKEN_SIZE)
+ return -1;
+
+ token_len = 0;
+ while (!cmdline_isendofcommand(buf[token_len]) &&
+ token_len < (STR_MULTI_TOKEN_SIZE - 1))
+ token_len++;
+
+ /* return if token too long */
+ if (token_len >= (STR_MULTI_TOKEN_SIZE - 1))
+ return -1;
+ }
+ /* unspecified string (unknown single token) */
else {
token_len = 0;
while(!cmdline_isendoftoken(buf[token_len]) &&
@@ -162,12 +177,16 @@ cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
}
if (res) {
- /* we are sure that token_len is < STR_TOKEN_SIZE-1 */
- snprintf(res, STR_TOKEN_SIZE, "%s", buf);
+ if ((sd->str != NULL) && (strcmp(sd->str, TOKEN_STRING_MULTI) == 0))
+ /* we are sure that token_len is < STR_MULTI_TOKEN_SIZE-1 */
+ snprintf(res, STR_MULTI_TOKEN_SIZE, "%s", buf);
+ else
+ /* we are sure that token_len is < STR_TOKEN_SIZE-1 */
+ snprintf(res, STR_TOKEN_SIZE, "%s", buf);
+
*((char *)res + token_len) = 0;
}
-
return token_len;
}
@@ -242,8 +261,10 @@ int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf,
s = sd->str;
if (s) {
- if (get_next_token(s))
- snprintf(dstbuf, size, MULTISTRING_HELP);
+ if (strcmp(s, TOKEN_STRING_MULTI) == 0)
+ snprintf(dstbuf, size, ANYSTRINGS_HELP);
+ else if (get_next_token(s))
+ snprintf(dstbuf, size, CHOICESTRING_HELP);
else
snprintf(dstbuf, size, FIXEDSTRING_HELP);
} else
diff --git a/lib/librte_cmdline/cmdline_parse_string.h b/lib/librte_cmdline/cmdline_parse_string.h
index 94aa1f1b..a84291b0 100644
--- a/lib/librte_cmdline/cmdline_parse_string.h
+++ b/lib/librte_cmdline/cmdline_parse_string.h
@@ -70,8 +70,13 @@ extern "C" {
/* size of a parsed string */
#define STR_TOKEN_SIZE 128
+/* size of a parsed multi string */
+#define STR_MULTI_TOKEN_SIZE 4096
+
typedef char cmdline_fixed_string_t[STR_TOKEN_SIZE];
+typedef char cmdline_multi_string_t[STR_MULTI_TOKEN_SIZE];
+
struct cmdline_token_string_data {
const char *str;
};
@@ -92,6 +97,16 @@ int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx,
int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf,
unsigned int size);
+/**
+* Token marked as TOKEN_STRING_MULTI takes entire parsing string
+* until “#” sign appear. Everything after “#” sign is treated as a comment.
+*
+* Note:
+* In this case second parameter of TOKEN_STRING_INITIALIZER must be a type of
+* cmdline_multi_string_t.
+*/
+#define TOKEN_STRING_MULTI ""
+
#define TOKEN_STRING_INITIALIZER(structure, field, string) \
{ \
/* hdr */ \
diff --git a/lib/librte_cmdline/rte_cmdline_version.map b/lib/librte_cmdline/rte_cmdline_version.map
index c9fc18ab..04bcb387 100644
--- a/lib/librte_cmdline/rte_cmdline_version.map
+++ b/lib/librte_cmdline/rte_cmdline_version.map
@@ -50,7 +50,6 @@ DPDK_2.0 {
cmdline_token_num_ops;
cmdline_token_portlist_ops;
cmdline_token_string_ops;
- cmdline_token_string_ops;
cmdline_write_char;
rdline_add_history;
rdline_char_in;
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 4ae9b9e8..d9bd8210 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -388,7 +388,8 @@ struct rte_crypto_sym_op {
* this location.
*
* @note
- * For Snow3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ * For Snow3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2
+ * and KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8,
* this field should be in bits.
*/
@@ -413,6 +414,7 @@ struct rte_crypto_sym_op {
*
* @note
* For Snow3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2
+ * and KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8,
* this field should be in bits.
*/
} data; /**< Data offsets and length for ciphering */
@@ -485,6 +487,7 @@ struct rte_crypto_sym_op {
*
* @note
* For Snow3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2
+ * and KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
* this field should be in bits.
*/
@@ -504,6 +507,7 @@ struct rte_crypto_sym_op {
*
* @note
* For Snow3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2
+ * and KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
* this field should be in bits.
*/
} data; /**< Data offsets and length for authentication */
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index aa4ea425..20e5beb8 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -102,6 +102,97 @@ struct rte_cryptodev_callback {
uint32_t active; /**< Callback is executing */
};
+#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+
+static const char *cryptodev_vdev_valid_params[] = {
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
+ RTE_CRYPTODEV_VDEV_SOCKET_ID
+};
+
+static uint8_t
+number_of_sockets(void)
+{
+ int sockets = 0;
+ int i;
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+
+ for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
+ if (sockets < ms[i].socket_id)
+ sockets = ms[i].socket_id;
+ }
+
+ /* Number of sockets = maximum socket_id + 1 */
+ return ++sockets;
+}
+
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CDEV_LOG_ERR("Argument has to be positive.");
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_cryptodev_parse_vdev_init_params(struct rte_crypto_vdev_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist;
+ int ret;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ cryptodev_vdev_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
+ &parse_integer_arg,
+ &params->max_nb_sessions);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ &parse_integer_arg,
+ &params->socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ if (params->socket_id >= number_of_sockets()) {
+ CDEV_LOG_ERR("Invalid socket id specified to create "
+ "the virtual crypto device on");
+ goto free_kvlist;
+ }
+ }
+
+ return 0;
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
const char *
rte_cryptodev_get_feature_name(uint64_t flag)
@@ -956,7 +1047,7 @@ rte_cryptodev_sym_session_init(struct rte_mempool *mp,
sess->mp = mp;
if (dev->dev_ops->session_initialize)
- (*dev->dev_ops->session_initialize)(mp, sess->_private);
+ (*dev->dev_ops->session_initialize)(mp, sess);
}
static int
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index d47f1e87..7768f0ae 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -59,12 +59,15 @@ extern "C" {
/**< Intel QAT Symmetric Crypto PMD device name */
#define CRYPTODEV_NAME_SNOW3G_PMD ("cryptodev_snow3g_pmd")
/**< SNOW 3G PMD device name */
+#define CRYPTODEV_NAME_KASUMI_PMD ("cryptodev_kasumi_pmd")
+/**< KASUMI PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */
RTE_CRYPTODEV_AESNI_GCM_PMD, /**< AES-NI GCM PMD */
RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */
+ RTE_CRYPTODEV_KASUMI_PMD, /**< KASUMI PMD */
RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */
RTE_CRYPTODEV_SNOW3G_PMD, /**< SNOW 3G PMD */
};
@@ -297,48 +300,6 @@ struct rte_crypto_vdev_init_params {
uint8_t socket_id;
};
-#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
-#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
-#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
-
-static const char *cryptodev_vdev_valid_params[] = {
- RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- RTE_CRYPTODEV_VDEV_SOCKET_ID
-};
-
-static inline uint8_t
-number_of_sockets(void)
-{
- int sockets = 0;
- int i;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
-
- for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
- if (sockets < ms[i].socket_id)
- sockets = ms[i].socket_id;
- }
-
- /* Number of sockets = maximum socket_id + 1 */
- return ++sockets;
-}
-
-/** Parse integer from integer argument */
-static inline int
-__rte_cryptodev_parse_integer_arg(const char *key __rte_unused,
- const char *value, void *extra_args)
-{
- int *i = (int *) extra_args;
-
- *i = atoi(value);
- if (*i < 0) {
- CDEV_LOG_ERR("Argument has to be positive.");
- return -1;
- }
-
- return 0;
-}
-
/**
* Parse virtual device initialisation parameters input arguments
* @internal
@@ -350,55 +311,10 @@ __rte_cryptodev_parse_integer_arg(const char *key __rte_unused,
* 0 on successful parse
* <0 on failure to parse
*/
-static inline int
-rte_cryptodev_parse_vdev_init_params(struct rte_crypto_vdev_init_params *params,
- const char *input_args)
-{
- struct rte_kvargs *kvlist;
- int ret;
-
- if (params == NULL)
- return -EINVAL;
-
- if (input_args) {
- kvlist = rte_kvargs_parse(input_args,
- cryptodev_vdev_valid_params);
- if (kvlist == NULL)
- return -1;
-
- ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- &__rte_cryptodev_parse_integer_arg,
- &params->max_nb_queue_pairs);
- if (ret < 0)
- goto free_kvlist;
-
- ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- &__rte_cryptodev_parse_integer_arg,
- &params->max_nb_sessions);
- if (ret < 0)
- goto free_kvlist;
-
- ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
- &__rte_cryptodev_parse_integer_arg,
- &params->socket_id);
- if (ret < 0)
- goto free_kvlist;
-
- if (params->socket_id >= number_of_sockets()) {
- CDEV_LOG_ERR("Invalid socket id specified to create "
- "the virtual crypto device on");
- goto free_kvlist;
- }
- }
-
- return 0;
-
-free_kvlist:
- rte_kvargs_free(kvlist);
- return ret;
-}
+int
+rte_cryptodev_parse_vdev_init_params(
+ struct rte_crypto_vdev_init_params *params,
+ const char *input_args);
/**
* Create a virtual crypto device
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index 41004e1c..a08fd202 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -32,3 +32,10 @@ DPDK_16.04 {
local: *;
};
+
+DPDK_16.07 {
+ global:
+
+ rte_cryptodev_parse_vdev_init_params;
+
+} DPDK_16.04;
diff --git a/lib/librte_eal/bsdapp/eal/Makefile b/lib/librte_eal/bsdapp/eal/Makefile
index 9054ad61..698fa0a1 100644
--- a/lib/librte_eal/bsdapp/eal/Makefile
+++ b/lib/librte_eal/bsdapp/eal/Makefile
@@ -40,8 +40,6 @@ VPATH += $(RTE_SDK)/lib/librte_eal/common/arch/$(ARCH_DIR)
CFLAGS += -I$(SRCDIR)/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
-CFLAGS += -I$(RTE_SDK)/lib/librte_ring
-CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
CFLAGS += $(WERROR_FLAGS) -O3
LDLIBS += -lexecinfo
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index 06bfd4e0..a0c8f8c8 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -605,7 +605,7 @@ rte_eal_init(int argc, char **argv)
/* Set thread_name for aid in debugging. */
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
"lcore-slave-%d", i);
- pthread_set_name_np(lcore_config[i].thread_id, thread_name);
+ rte_thread_setname(lcore_config[i].thread_id, thread_name);
}
/*
diff --git a/lib/librte_eal/bsdapp/eal/eal_debug.c b/lib/librte_eal/bsdapp/eal/eal_debug.c
index 907fbfa7..5fbc17c5 100644
--- a/lib/librte_eal/bsdapp/eal/eal_debug.c
+++ b/lib/librte_eal/bsdapp/eal/eal_debug.c
@@ -77,9 +77,6 @@ void __rte_panic(const char *funcname, const char *format, ...)
{
va_list ap;
- /* disable history */
- rte_log_set_history(0);
-
rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
va_start(ap, format);
rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
@@ -98,9 +95,6 @@ rte_exit(int exit_code, const char *format, ...)
{
va_list ap;
- /* disable history */
- rte_log_set_history(0);
-
if (exit_code != 0)
RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\n"
" Cause: ", exit_code);
diff --git a/lib/librte_eal/bsdapp/eal/eal_pci.c b/lib/librte_eal/bsdapp/eal/eal_pci.c
index 2d16d782..374b68f2 100644
--- a/lib/librte_eal/bsdapp/eal/eal_pci.c
+++ b/lib/librte_eal/bsdapp/eal/eal_pci.c
@@ -278,6 +278,11 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
/* get subsystem_device id */
dev->id.subsystem_device_id = conf->pc_subdevice;
+ /* get class id */
+ dev->id.class_id = (conf->pc_class << 16) |
+ (conf->pc_subclass << 8) |
+ (conf->pc_progif);
+
/* TODO: get max_vfs */
dev->max_vfs = 0;
@@ -422,7 +427,7 @@ int rte_eal_pci_read_config(const struct rte_pci_device *dev,
goto error;
}
- fd = open("/dev/pci", O_RDONLY);
+ fd = open("/dev/pci", O_RDWR);
if (fd < 0) {
RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
goto error;
@@ -466,7 +471,7 @@ int rte_eal_pci_write_config(const struct rte_pci_device *dev,
memcpy(&pi.pi_data, buf, len);
- fd = open("/dev/pci", O_RDONLY);
+ fd = open("/dev/pci", O_RDWR);
if (fd < 0) {
RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
goto error;
diff --git a/lib/librte_eal/bsdapp/eal/eal_thread.c b/lib/librte_eal/bsdapp/eal/eal_thread.c
index 9a034373..1b8cd8a6 100644
--- a/lib/librte_eal/bsdapp/eal/eal_thread.c
+++ b/lib/librte_eal/bsdapp/eal/eal_thread.c
@@ -199,3 +199,10 @@ int rte_sys_gettid(void)
thr_self(&lwpid);
return (int)lwpid;
}
+
+int rte_thread_setname(pthread_t id, const char *name)
+{
+ /* this BSD function returns no error */
+ pthread_set_name_np(id, name);
+ return 0;
+}
diff --git a/lib/librte_eal/bsdapp/eal/rte_eal_version.map b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
index 58c2951e..1852c4a4 100644
--- a/lib/librte_eal/bsdapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
@@ -151,3 +151,13 @@ DPDK_16.04 {
rte_eal_primary_proc_alive;
} DPDK_2.2;
+
+DPDK_16.07 {
+ global:
+
+ pci_get_sysfs_path;
+ rte_keepalive_mark_sleep;
+ rte_keepalive_register_relay_callback;
+ rte_thread_setname;
+
+} DPDK_16.04;
diff --git a/lib/librte_eal/common/eal_common_devargs.c b/lib/librte_eal/common/eal_common_devargs.c
index 2bfe54a1..e403717b 100644
--- a/lib/librte_eal/common/eal_common_devargs.c
+++ b/lib/librte_eal/common/eal_common_devargs.c
@@ -58,7 +58,7 @@ rte_eal_parse_devargs_str(const char *devargs_str,
return -1;
*drvname = strdup(devargs_str);
- if (drvname == NULL)
+ if (*drvname == NULL)
return -1;
/* set the first ',' to '\0' to split name and arguments */
diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
index a4263ba5..2cd41320 100644
--- a/lib/librte_eal/common/eal_common_lcore.c
+++ b/lib/librte_eal/common/eal_common_lcore.c
@@ -104,7 +104,7 @@ rte_eal_cpu_init(void)
RTE_LOG(DEBUG, EAL,
"Support maximum %u logical core(s) by configuration.\n",
RTE_MAX_LCORE);
- RTE_LOG(DEBUG, EAL, "Detected %u lcore(s)\n", config->lcore_count);
+ RTE_LOG(INFO, EAL, "Detected %u lcore(s)\n", config->lcore_count);
return 0;
}
diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
index 1ae8de70..7916c781 100644
--- a/lib/librte_eal/common/eal_common_log.c
+++ b/lib/librte_eal/common/eal_common_log.c
@@ -31,54 +31,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <string.h>
#include <stdio.h>
#include <stdint.h>
#include <stdarg.h>
-#include <sys/types.h>
#include <stdlib.h>
-#include <unistd.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_eal.h>
#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_debug.h>
-#include <rte_spinlock.h>
-#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_mempool.h>
#include "eal_private.h"
-#define LOG_ELT_SIZE 2048
-
-#define LOG_HISTORY_MP_NAME "log_history"
-
-STAILQ_HEAD(log_history_list, log_history);
-
-/**
- * The structure of a message log in the log history.
- */
-struct log_history {
- STAILQ_ENTRY(log_history) next;
- unsigned size;
- char buf[0];
-};
-
-static struct rte_mempool *log_history_mp = NULL;
-static unsigned log_history_size = 0;
-static struct log_history_list log_history;
-
/* global log structure */
struct rte_logs rte_logs = {
.type = ~0,
@@ -86,10 +48,7 @@ struct rte_logs rte_logs = {
.file = NULL,
};
-static rte_spinlock_t log_dump_lock = RTE_SPINLOCK_INITIALIZER;
-static rte_spinlock_t log_list_lock = RTE_SPINLOCK_INITIALIZER;
static FILE *default_log_stream;
-static int history_enabled = 1;
/**
* This global structure stores some informations about the message
@@ -98,66 +57,24 @@ static int history_enabled = 1;
struct log_cur_msg {
uint32_t loglevel; /**< log level - see rte_log.h */
uint32_t logtype; /**< log type - see rte_log.h */
-} __rte_cache_aligned;
-static struct log_cur_msg log_cur_msg[RTE_MAX_LCORE]; /**< per core log */
+};
+ /* per core log */
+static RTE_DEFINE_PER_LCORE(struct log_cur_msg, log_cur_msg);
/* default logs */
int
-rte_log_add_in_history(const char *buf, size_t size)
+rte_log_add_in_history(const char *buf __rte_unused, size_t size __rte_unused)
{
- struct log_history *hist_buf = NULL;
- static const unsigned hist_buf_size = LOG_ELT_SIZE - sizeof(*hist_buf);
- void *obj;
-
- if (history_enabled == 0)
- return 0;
-
- rte_spinlock_lock(&log_list_lock);
-
- /* get a buffer for adding in history */
- if (log_history_size > RTE_LOG_HISTORY) {
- hist_buf = STAILQ_FIRST(&log_history);
- if (hist_buf) {
- STAILQ_REMOVE_HEAD(&log_history, next);
- log_history_size--;
- }
- }
- else {
- if (rte_mempool_mc_get(log_history_mp, &obj) < 0)
- obj = NULL;
- hist_buf = obj;
- }
-
- /* no buffer */
- if (hist_buf == NULL) {
- rte_spinlock_unlock(&log_list_lock);
- return -ENOBUFS;
- }
-
- /* not enough room for msg, buffer go back in mempool */
- if (size >= hist_buf_size) {
- rte_mempool_mp_put(log_history_mp, hist_buf);
- rte_spinlock_unlock(&log_list_lock);
- return -ENOBUFS;
- }
-
- /* add in history */
- memcpy(hist_buf->buf, buf, size);
- hist_buf->buf[size] = hist_buf->buf[hist_buf_size-1] = '\0';
- hist_buf->size = size;
- STAILQ_INSERT_TAIL(&log_history, hist_buf, next);
- log_history_size++;
- rte_spinlock_unlock(&log_list_lock);
-
return 0;
}
void
rte_log_set_history(int enable)
{
- history_enabled = enable;
+ if (enable)
+ RTE_LOG(WARNING, EAL, "The log history is deprecated.\n");
}
/* Change the stream that will be used by logging system */
@@ -205,63 +122,19 @@ rte_get_log_type(void)
/* get the current loglevel for the message beeing processed */
int rte_log_cur_msg_loglevel(void)
{
- unsigned lcore_id;
- lcore_id = rte_lcore_id();
- if (lcore_id >= RTE_MAX_LCORE)
- return rte_get_log_level();
- return log_cur_msg[lcore_id].loglevel;
+ return RTE_PER_LCORE(log_cur_msg).loglevel;
}
/* get the current logtype for the message beeing processed */
int rte_log_cur_msg_logtype(void)
{
- unsigned lcore_id;
- lcore_id = rte_lcore_id();
- if (lcore_id >= RTE_MAX_LCORE)
- return rte_get_log_type();
- return log_cur_msg[lcore_id].logtype;
+ return RTE_PER_LCORE(log_cur_msg).logtype;
}
/* Dump log history to file */
void
-rte_log_dump_history(FILE *out)
+rte_log_dump_history(FILE *out __rte_unused)
{
- struct log_history_list tmp_log_history;
- struct log_history *hist_buf;
- unsigned i;
-
- /* only one dump at a time */
- rte_spinlock_lock(&log_dump_lock);
-
- /* save list, and re-init to allow logging during dump */
- rte_spinlock_lock(&log_list_lock);
- tmp_log_history = log_history;
- STAILQ_INIT(&log_history);
- log_history_size = 0;
- rte_spinlock_unlock(&log_list_lock);
-
- for (i=0; i<RTE_LOG_HISTORY; i++) {
-
- /* remove one message from history list */
- hist_buf = STAILQ_FIRST(&tmp_log_history);
-
- if (hist_buf == NULL)
- break;
-
- STAILQ_REMOVE_HEAD(&tmp_log_history, next);
-
- /* write on stdout */
- if (fwrite(hist_buf->buf, hist_buf->size, 1, out) == 0) {
- rte_mempool_mp_put(log_history_mp, hist_buf);
- break;
- }
-
- /* put back message structure in pool */
- rte_mempool_mp_put(log_history_mp, hist_buf);
- }
- fflush(out);
-
- rte_spinlock_unlock(&log_dump_lock);
}
/*
@@ -273,17 +146,13 @@ rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
{
int ret;
FILE *f = rte_logs.file;
- unsigned lcore_id;
if ((level > rte_logs.level) || !(logtype & rte_logs.type))
return 0;
/* save loglevel and logtype in a global per-lcore variable */
- lcore_id = rte_lcore_id();
- if (lcore_id < RTE_MAX_LCORE) {
- log_cur_msg[lcore_id].loglevel = level;
- log_cur_msg[lcore_id].logtype = logtype;
- }
+ RTE_PER_LCORE(log_cur_msg).loglevel = level;
+ RTE_PER_LCORE(log_cur_msg).logtype = logtype;
ret = vfprintf(f, format, ap);
fflush(f);
@@ -308,30 +177,17 @@ rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
}
/*
- * called by environment-specific log init function to initialize log
- * history
+ * called by environment-specific log init function
*/
int
rte_eal_common_log_init(FILE *default_log)
{
- STAILQ_INIT(&log_history);
-
- /* reserve RTE_LOG_HISTORY*2 elements, so we can dump and
- * keep logging during this time */
- log_history_mp = rte_mempool_create(LOG_HISTORY_MP_NAME, RTE_LOG_HISTORY*2,
- LOG_ELT_SIZE, 0, 0,
- NULL, NULL,
- NULL, NULL,
- SOCKET_ID_ANY, 0);
-
- if ((log_history_mp == NULL) &&
- ((log_history_mp = rte_mempool_lookup(LOG_HISTORY_MP_NAME)) == NULL)){
- RTE_LOG(ERR, EAL, "%s(): cannot create log_history mempool\n",
- __func__);
- return -1;
- }
-
default_log_stream = default_log;
rte_openlog_stream(default_log);
+
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+ RTE_LOG(NOTICE, EAL, "Debug logs available - lower performance\n");
+#endif
+
return 0;
}
diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
index 711c8457..5d28341f 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -119,6 +119,9 @@ find_heap_max_free_elem(int *s, unsigned align)
}
}
+ if (len < MALLOC_ELEM_OVERHEAD + align)
+ return 0;
+
return len - MALLOC_ELEM_OVERHEAD - align;
}
@@ -126,6 +129,7 @@ static const struct rte_memzone *
memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
int socket_id, unsigned flags, unsigned align, unsigned bound)
{
+ struct rte_memzone *mz;
struct rte_mem_config *mcfg;
size_t requested_len;
int socket, i;
@@ -148,6 +152,13 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
return NULL;
}
+ if (strlen(name) >= sizeof(mz->name) - 1) {
+ RTE_LOG(DEBUG, EAL, "%s(): memzone <%s>: name too long\n",
+ __func__, name);
+ rte_errno = EEXIST;
+ return NULL;
+ }
+
/* if alignment is not a power of two */
if (align && !rte_is_power_of_2(align)) {
RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
@@ -189,8 +200,13 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
if (len == 0) {
if (bound != 0)
requested_len = bound;
- else
+ else {
requested_len = find_heap_max_free_elem(&socket_id, align);
+ if (requested_len == 0) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ }
}
if (socket_id == SOCKET_ID_ANY)
@@ -223,7 +239,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
const struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
/* fill the zone in config */
- struct rte_memzone *mz = get_next_free_memzone();
+ mz = get_next_free_memzone();
if (mz == NULL) {
RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room "
@@ -321,15 +337,19 @@ rte_memzone_free(const struct rte_memzone *mz)
idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
idx = idx / sizeof(struct rte_memzone);
- addr = mcfg->memzone[idx].addr;
#ifdef RTE_LIBRTE_IVSHMEM
/*
* If ioremap_addr is set, it's an IVSHMEM memzone and we cannot
* free it.
*/
- if (mcfg->memzone[idx].ioremap_addr != 0)
- ret = -EINVAL;
+ if (mcfg->memzone[idx].ioremap_addr != 0) {
+ rte_rwlock_write_unlock(&mcfg->mlock);
+ return -EINVAL;
+ }
#endif
+
+ addr = mcfg->memzone[idx].addr;
+
if (addr == NULL)
ret = -EINVAL;
else if (mcfg->memzone_cnt == 0) {
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index 2b418d52..3efc90f0 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -139,7 +139,11 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
internal_cfg->syslog_facility = LOG_DAEMON;
/* default value from build option */
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+ internal_cfg->log_level = RTE_LOG_INFO;
+#else
internal_cfg->log_level = RTE_LOG_LEVEL;
+#endif
internal_cfg->xen_dom0_support = 0;
diff --git a/lib/librte_eal/common/eal_common_pci.c b/lib/librte_eal/common/eal_common_pci.c
index 40f49229..7248c38b 100644
--- a/lib/librte_eal/common/eal_common_pci.c
+++ b/lib/librte_eal/common/eal_common_pci.c
@@ -85,6 +85,19 @@
struct pci_driver_list pci_driver_list;
struct pci_device_list pci_device_list;
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+
+const char *pci_get_sysfs_path(void)
+{
+ const char *path = NULL;
+
+ path = getenv("SYSFS_PCI_DEVICES");
+ if (path == NULL)
+ return SYSFS_PCI_DEVICES;
+
+ return path;
+}
+
static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
{
struct rte_devargs *devargs;
@@ -162,23 +175,26 @@ rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *d
if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
id_table->subsystem_device_id != PCI_ANY_ID)
continue;
+ if (id_table->class_id != dev->id.class_id &&
+ id_table->class_id != RTE_CLASS_ANY_ID)
+ continue;
struct rte_pci_addr *loc = &dev->addr;
- RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ RTE_LOG(INFO, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
loc->domain, loc->bus, loc->devid, loc->function,
dev->numa_node);
- RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
- dev->id.device_id, dr->name);
-
/* no initialization when blacklisted, return without error */
if (dev->devargs != NULL &&
dev->devargs->type == RTE_DEVTYPE_BLACKLISTED_PCI) {
- RTE_LOG(DEBUG, EAL, " Device is blacklisted, not initializing\n");
+ RTE_LOG(INFO, EAL, " Device is blacklisted, not initializing\n");
return 1;
}
+ RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->name);
+
if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
/* map resources for devices that use igb_uio */
ret = rte_eal_pci_map_device(dev);
diff --git a/lib/librte_eal/common/eal_common_pci_uio.c b/lib/librte_eal/common/eal_common_pci_uio.c
index f062e81d..367a6816 100644
--- a/lib/librte_eal/common/eal_common_pci_uio.c
+++ b/lib/librte_eal/common/eal_common_pci_uio.c
@@ -53,7 +53,7 @@ EAL_REGISTER_TAILQ(rte_uio_tailq)
static int
pci_uio_map_secondary(struct rte_pci_device *dev)
{
- int fd, i;
+ int fd, i, j;
struct mapped_pci_resource *uio_res;
struct mapped_pci_res_list *uio_res_list =
RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
@@ -85,6 +85,16 @@ pci_uio_map_secondary(struct rte_pci_device *dev)
"Cannot mmap device resource file %s to address: %p\n",
uio_res->maps[i].path,
uio_res->maps[i].addr);
+ if (mapaddr != MAP_FAILED) {
+ /* unmap addrs correctly mapped */
+ for (j = 0; j < i; j++)
+ pci_unmap_resource(
+ uio_res->maps[j].addr,
+ (size_t)uio_res->maps[j].size);
+ /* unmap addr wrongly mapped */
+ pci_unmap_resource(mapaddr,
+ (size_t)uio_res->maps[i].size);
+ }
return -1;
}
}
@@ -159,7 +169,8 @@ pci_uio_unmap(struct mapped_pci_resource *uio_res)
for (i = 0; i != uio_res->nb_maps; i++) {
pci_unmap_resource(uio_res->maps[i].addr,
(size_t)uio_res->maps[i].size);
- rte_free(uio_res->maps[i].path);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(uio_res->maps[i].path);
}
}
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 2342fa16..857dc3ea 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -49,9 +49,6 @@ int rte_eal_memzone_init(void);
/**
* Common log initialization function (private to eal).
*
- * Called by environment-specific log initialization function to initialize
- * log history.
- *
* @param default_log
* The default log stream to be used.
* @return
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
index 988125b3..da6c233a 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
@@ -323,12 +323,6 @@ rte_memcpy(void *dst, const void *src, size_t n)
return memcpy(dst, src, n);
}
-static inline void *
-rte_memcpy_func(void *dst, const void *src, size_t n)
-{
- return memcpy(dst, src, n);
-}
-
#endif /* RTE_ARCH_ARM_NEON_MEMCPY */
#ifdef __cplusplus
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
index 917cdc1b..5db66b63 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h
@@ -80,12 +80,6 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
#define rte_memcpy(d, s, n) memcpy((d), (s), (n))
-static inline void *
-rte_memcpy_func(void *dst, const void *src, size_t n)
-{
- return memcpy(dst, src, n);
-}
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/arch/tile/rte_memcpy.h b/lib/librte_eal/common/include/arch/tile/rte_memcpy.h
index 9b5b37ef..e606957c 100644
--- a/lib/librte_eal/common/include/arch/tile/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/tile/rte_memcpy.h
@@ -80,12 +80,6 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
#define rte_memcpy(d, s, n) memcpy((d), (s), (n))
-static inline void *
-rte_memcpy_func(void *dst, const void *src, size_t n)
-{
- return memcpy(dst, src, n);
-}
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
index f463ab30..413035e7 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -363,71 +363,26 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
}
/**
- * Copy 256 bytes from one location to another,
- * locations should not overlap.
- */
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
- rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
- rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
- rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
- rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
- rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
- rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
- rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
- rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
-}
-
-/**
- * Copy 64-byte blocks from one location to another,
- * locations should not overlap.
- */
-static inline void
-rte_mov64blocks(uint8_t *dst, const uint8_t *src, size_t n)
-{
- __m256i ymm0, ymm1;
-
- while (n >= 64) {
- ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
- n -= 64;
- ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
- src = (const uint8_t *)src + 64;
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
- dst = (uint8_t *)dst + 64;
- }
-}
-
-/**
- * Copy 256-byte blocks from one location to another,
+ * Copy 128-byte blocks from one location to another,
* locations should not overlap.
*/
static inline void
-rte_mov256blocks(uint8_t *dst, const uint8_t *src, size_t n)
+rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
{
- __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
+ __m256i ymm0, ymm1, ymm2, ymm3;
- while (n >= 256) {
+ while (n >= 128) {
ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
- n -= 256;
+ n -= 128;
ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
- ymm4 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 4 * 32));
- ymm5 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 5 * 32));
- ymm6 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 6 * 32));
- ymm7 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 7 * 32));
- src = (const uint8_t *)src + 256;
+ src = (const uint8_t *)src + 128;
_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 4 * 32), ymm4);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 5 * 32), ymm5);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 6 * 32), ymm6);
- _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 7 * 32), ymm7);
- dst = (uint8_t *)dst + 256;
+ dst = (uint8_t *)dst + 128;
}
}
@@ -466,51 +421,56 @@ rte_memcpy(void *dst, const void *src, size_t n)
}
/**
- * Fast way when copy size doesn't exceed 512 bytes
+ * Fast way when copy size doesn't exceed 256 bytes
*/
if (n <= 32) {
rte_mov16((uint8_t *)dst, (const uint8_t *)src);
- rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+ rte_mov16((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
+ return ret;
+ }
+ if (n <= 48) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
+ rte_mov16((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
return ret;
}
if (n <= 64) {
rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ rte_mov32((uint8_t *)dst - 32 + n,
+ (const uint8_t *)src - 32 + n);
return ret;
}
- if (n <= 512) {
- if (n >= 256) {
- n -= 256;
- rte_mov256((uint8_t *)dst, (const uint8_t *)src);
- src = (const uint8_t *)src + 256;
- dst = (uint8_t *)dst + 256;
- }
+ if (n <= 256) {
if (n >= 128) {
n -= 128;
rte_mov128((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 128;
dst = (uint8_t *)dst + 128;
}
+COPY_BLOCK_128_BACK31:
if (n >= 64) {
n -= 64;
rte_mov64((uint8_t *)dst, (const uint8_t *)src);
src = (const uint8_t *)src + 64;
dst = (uint8_t *)dst + 64;
}
-COPY_BLOCK_64_BACK31:
if (n > 32) {
rte_mov32((uint8_t *)dst, (const uint8_t *)src);
- rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ rte_mov32((uint8_t *)dst - 32 + n,
+ (const uint8_t *)src - 32 + n);
return ret;
}
if (n > 0) {
- rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+ rte_mov32((uint8_t *)dst - 32 + n,
+ (const uint8_t *)src - 32 + n);
}
return ret;
}
/**
- * Make store aligned when copy size exceeds 512 bytes
+ * Make store aligned when copy size exceeds 256 bytes
*/
dstofss = (uintptr_t)dst & 0x1F;
if (dstofss > 0) {
@@ -522,35 +482,19 @@ COPY_BLOCK_64_BACK31:
}
/**
- * Copy 256-byte blocks.
- * Use copy block function for better instruction order control,
- * which is important when load is unaligned.
+ * Copy 128-byte blocks
*/
- rte_mov256blocks((uint8_t *)dst, (const uint8_t *)src, n);
+ rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
bits = n;
- n = n & 255;
+ n = n & 127;
bits -= n;
src = (const uint8_t *)src + bits;
dst = (uint8_t *)dst + bits;
/**
- * Copy 64-byte blocks.
- * Use copy block function for better instruction order control,
- * which is important when load is unaligned.
- */
- if (n >= 64) {
- rte_mov64blocks((uint8_t *)dst, (const uint8_t *)src, n);
- bits = n;
- n = n & 63;
- bits -= n;
- src = (const uint8_t *)src + bits;
- dst = (uint8_t *)dst + bits;
- }
-
- /**
* Copy whatever left
*/
- goto COPY_BLOCK_64_BACK31;
+ goto COPY_BLOCK_128_BACK31;
}
#else /* RTE_MACHINE_CPUFLAG */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
index d9356419..0649f794 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_rtm.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
@@ -50,11 +50,10 @@ void rte_xend(void)
asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
}
-static __attribute__((__always_inline__)) inline
-void rte_xabort(const unsigned int status)
-{
- asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory");
-}
+/* not an inline function to workaround a clang bug with -O0 */
+#define rte_xabort(status) do { \
+ asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); \
+} while (0)
static __attribute__((__always_inline__)) inline
int rte_xtest(void)
diff --git a/lib/librte_eal/common/include/generic/rte_memcpy.h b/lib/librte_eal/common/include/generic/rte_memcpy.h
index 03e84773..afb0afe4 100644
--- a/lib/librte_eal/common/include/generic/rte_memcpy.h
+++ b/lib/librte_eal/common/include/generic/rte_memcpy.h
@@ -134,11 +134,4 @@ rte_memcpy(void *dst, const void *src, size_t n);
#endif /* __DOXYGEN__ */
-/*
- * memcpy() function used by rte_memcpy macro
- */
-static inline void *
-rte_memcpy_func(void *dst, const void *src, size_t n) __attribute__((always_inline));
-
-
#endif /* _RTE_MEMCPY_H_ */
diff --git a/lib/librte_eal/common/include/rte_debug.h b/lib/librte_eal/common/include/rte_debug.h
index 94129fab..cab6fb4c 100644
--- a/lib/librte_eal/common/include/rte_debug.h
+++ b/lib/librte_eal/common/include/rte_debug.h
@@ -43,6 +43,9 @@
* the implementation is architecture-specific.
*/
+#include "rte_log.h"
+#include "rte_branch_prediction.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -76,8 +79,13 @@ void rte_dump_registers(void);
#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+#define RTE_ASSERT(exp) RTE_VERIFY(exp)
+#else
+#define RTE_ASSERT(exp) do {} while (0)
+#endif
#define RTE_VERIFY(exp) do { \
- if (!(exp)) \
+ if (unlikely(!(exp))) \
rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
} while (0)
diff --git a/lib/librte_eal/common/include/rte_keepalive.h b/lib/librte_eal/common/include/rte_keepalive.h
index 10dac2e0..88ad8e48 100644
--- a/lib/librte_eal/common/include/rte_keepalive.h
+++ b/lib/librte_eal/common/include/rte_keepalive.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright 2015 Intel Shannon Ltd. All rights reserved.
+ * Copyright 2015-2016 Intel Shannon Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -48,17 +48,47 @@
#define RTE_KEEPALIVE_MAXCORES RTE_MAX_LCORE
#endif
+enum rte_keepalive_state {
+ RTE_KA_STATE_UNUSED = 0,
+ RTE_KA_STATE_ALIVE = 1,
+ RTE_KA_STATE_MISSING = 4,
+ RTE_KA_STATE_DEAD = 2,
+ RTE_KA_STATE_GONE = 3,
+ RTE_KA_STATE_DOZING = 5,
+ RTE_KA_STATE_SLEEP = 6
+};
+
/**
* Keepalive failure callback.
*
* Receives a data pointer passed to rte_keepalive_create() and the id of the
* failed core.
+ * @param data Data pointer passed to rte_keepalive_create()
+ * @param id_core ID of the core that has failed
*/
typedef void (*rte_keepalive_failure_callback_t)(
void *data,
const int id_core);
/**
+ * Keepalive relay callback.
+ *
+ * Receives a data pointer passed to rte_keepalive_register_relay_callback(),
+ * the id of the core for which state is to be forwarded, and details of the
+ * current core state.
+ * @param data Data pointer passed to rte_keepalive_register_relay_callback()
+ * @param id_core ID of the core for which state is being reported
+ * @param core_state The current state of the core
+ * @param Timestamp of when core was last seen alive
+ */
+typedef void (*rte_keepalive_relay_callback_t)(
+ void *data,
+ const int id_core,
+ enum rte_keepalive_state core_state,
+ uint64_t last_seen
+ );
+
+/**
* Keepalive state structure.
* @internal
*/
@@ -105,4 +135,35 @@ void rte_keepalive_register_core(struct rte_keepalive *keepcfg,
void
rte_keepalive_mark_alive(struct rte_keepalive *keepcfg);
+/**
+ * Per-core sleep-time indication.
+ * @param *keepcfg
+ * Keepalive structure pointer
+ *
+ * If CPU idling is enabled, this function needs to be called from within
+ * the main process loop of the LCore going to sleep, in order to avoid
+ * the LCore being mis-detected as dead.
+ */
+void
+rte_keepalive_mark_sleep(struct rte_keepalive *keepcfg);
+
+/**
+ * Registers a 'live core' callback.
+ *
+ * The complement of the 'dead core' callback. This is called when a
+ * core is known to be alive, and is intended for cases when an app
+ * needs to know 'liveness' beyond just knowing when a core has died.
+ *
+ * @param *keepcfg
+ * Keepalive structure pointer
+ * @param callback
+ * Function called upon detection of a dead core.
+ * @param data
+ * Data pointer to be passed to function callback.
+ */
+void
+rte_keepalive_register_relay_callback(struct rte_keepalive *keepcfg,
+ rte_keepalive_relay_callback_t callback,
+ void *data);
+
#endif /* _KEEPALIVE_H_ */
diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h
index ac151302..fe7b5865 100644
--- a/lib/librte_eal/common/include/rte_lcore.h
+++ b/lib/librte_eal/common/include/rte_lcore.h
@@ -250,23 +250,16 @@ void rte_thread_get_affinity(rte_cpuset_t *cpusetp);
/**
* Set thread names.
*
- * Macro to wrap `pthread_setname_np()` with a glibc version check.
- * Only glibc >= 2.12 supports this feature.
+ * @note It fails with glibc < 2.12.
*
- * This macro only used for Linux, BSD does direct libc call.
- * BSD libc version of function is `pthread_set_name_np()`.
+ * @param id
+ * Thread id.
+ * @param name
+ * Thread name to set.
+ * @return
+ * On success, return 0; otherwise return a negative value.
*/
-#if defined(__DOXYGEN__)
-#define rte_thread_setname(...) pthread_setname_np(__VA_ARGS__)
-#endif
-
-#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
-#if __GLIBC_PREREQ(2, 12)
-#define rte_thread_setname(...) pthread_setname_np(__VA_ARGS__)
-#else
-#define rte_thread_setname(...) 0
-#endif
-#endif
+int rte_thread_setname(pthread_t id, const char *name);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 2e47e7f6..b1add04c 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -42,6 +42,8 @@
* This file provides a log API to RTE applications.
*/
+#include "rte_common.h" /* for __rte_deprecated macro */
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -179,22 +181,27 @@ int rte_log_cur_msg_loglevel(void);
int rte_log_cur_msg_logtype(void);
/**
+ * @deprecated
* Enable or disable the history (enabled by default)
*
* @param enable
* true to enable, or 0 to disable history.
*/
+__rte_deprecated
void rte_log_set_history(int enable);
/**
+ * @deprecated
* Dump the log history to a file
*
* @param f
* A pointer to a file for output
*/
+__rte_deprecated
void rte_log_dump_history(FILE *f);
/**
+ * @deprecated
* Add a log message to the history.
*
* This function can be called from a user-defined log stream. It adds
@@ -209,6 +216,7 @@ void rte_log_dump_history(FILE *f);
* - 0: Success.
* - (-ENOBUFS) if there is no room to store the message.
*/
+__rte_deprecated
int rte_log_add_in_history(const char *buf, size_t size);
/**
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index f8dbece0..06611093 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -200,21 +200,22 @@ unsigned rte_memory_get_nrank(void);
int rte_xen_dom0_supported(void);
/**< Internal use only - phys to virt mapping for xen */
-phys_addr_t rte_xen_mem_phy2mch(uint32_t, const phys_addr_t);
+phys_addr_t rte_xen_mem_phy2mch(int32_t, const phys_addr_t);
/**
* Return the physical address of elt, which is an element of the pool mp.
*
* @param memseg_id
- * The mempool is from which memory segment.
+ * Identifier of the memory segment owning the physical address. If
+ * set to -1, find it automatically.
* @param phy_addr
* physical address of elt.
*
* @return
- * The physical address or error.
+ * The physical address or RTE_BAD_PHYS_ADDR on error.
*/
static inline phys_addr_t
-rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+rte_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr)
{
if (rte_xen_dom0_supported())
return rte_xen_mem_phy2mch(memseg_id, phy_addr);
@@ -250,7 +251,7 @@ static inline int rte_xen_dom0_supported(void)
}
static inline phys_addr_t
-rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
+rte_mem_phy2mch(int32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
{
return phy_addr;
}
diff --git a/lib/librte_eal/common/include/rte_pci.h b/lib/librte_eal/common/include/rte_pci.h
index e692094e..fa749626 100644
--- a/lib/librte_eal/common/include/rte_pci.h
+++ b/lib/librte_eal/common/include/rte_pci.h
@@ -91,7 +91,7 @@ extern struct pci_driver_list pci_driver_list; /**< Global list of PCI drivers.
extern struct pci_device_list pci_device_list; /**< Global list of PCI devices. */
/** Pathname of PCI devices directory. */
-#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+const char *pci_get_sysfs_path(void);
/** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */
#define PCI_PRI_FMT "%.4" PRIx16 ":%.2" PRIx8 ":%.2" PRIx8 ".%" PRIx8
@@ -105,9 +105,6 @@ extern struct pci_device_list pci_device_list; /**< Global list of PCI devices.
/** Nb. of values in PCI resource format. */
#define PCI_RESOURCE_FMT_NVAL 3
-/** IO resource type: memory address space */
-#define IORESOURCE_MEM 0x00000200
-
/**
* A structure describing a PCI resource.
*/
@@ -125,6 +122,7 @@ struct rte_pci_resource {
* table of these IDs for each device that it supports.
*/
struct rte_pci_id {
+ uint32_t class_id; /**< Class ID (class, subclass, pi) or RTE_CLASS_ANY_ID. */
uint16_t vendor_id; /**< Vendor ID or PCI_ANY_ID. */
uint16_t device_id; /**< Device ID or PCI_ANY_ID. */
uint16_t subsystem_vendor_id; /**< Subsystem vendor ID or PCI_ANY_ID. */
@@ -170,10 +168,12 @@ struct rte_pci_device {
/** Any PCI device identifier (vendor, device, ...) */
#define PCI_ANY_ID (0xffff)
+#define RTE_CLASS_ANY_ID (0xffffff)
#ifdef __cplusplus
/** C++ macro used to help building up tables of device IDs */
#define RTE_PCI_DEVICE(vend, dev) \
+ RTE_CLASS_ANY_ID, \
(vend), \
(dev), \
PCI_ANY_ID, \
@@ -181,6 +181,7 @@ struct rte_pci_device {
#else
/** Macro used to help building up tables of device IDs */
#define RTE_PCI_DEVICE(vend, dev) \
+ .class_id = RTE_CLASS_ANY_ID, \
.vendor_id = (vend), \
.device_id = (dev), \
.subsystem_vendor_id = PCI_ANY_ID, \
@@ -213,8 +214,6 @@ struct rte_pci_driver {
/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
#define RTE_PCI_DRV_NEED_MAPPING 0x0001
-/** Device driver must be registered several times until failure - deprecated */
-#pragma GCC poison RTE_PCI_DRV_MULTIPLE
/** Device needs to be unbound even if no module is provided */
#define RTE_PCI_DRV_FORCE_UNBIND 0x0004
/** Device driver supports link state interrupt */
@@ -520,15 +519,17 @@ int rte_eal_pci_write_config(const struct rte_pci_device *device,
struct rte_pci_ioport {
struct rte_pci_device *dev;
uint64_t base;
+ uint64_t len; /* only filled for memory mapped ports */
};
/**
- * Initialises a rte_pci_ioport object for a pci device io resource.
+ * Initialize a rte_pci_ioport object for a pci device io resource.
+ *
* This object is then used to gain access to those io resources (see below).
*
* @param dev
- * A pointer to a rte_pci_device structure describing the device.
- * to use
+ * A pointer to a rte_pci_device structure describing the device
+ * to use.
* @param bar
* Index of the io pci resource we want to access.
* @param p
@@ -544,6 +545,8 @@ int rte_eal_pci_ioport_map(struct rte_pci_device *dev, int bar,
*
* @param p
* The rte_pci_ioport object to be uninitialized.
+ * @return
+ * 0 on success, negative on error.
*/
int rte_eal_pci_ioport_unmap(struct rte_pci_ioport *p);
@@ -577,20 +580,6 @@ void rte_eal_pci_ioport_read(struct rte_pci_ioport *p,
void rte_eal_pci_ioport_write(struct rte_pci_ioport *p,
const void *data, size_t len, off_t offset);
-#ifdef RTE_PCI_CONFIG
-#include <rte_common.h>
-/**
- * Set special config space registers for performance purpose.
- * It is deprecated, as all configurations have been moved into
- * each PMDs respectively.
- *
- * @param dev
- * A pointer to a rte_pci_device structure describing the device
- * to use
- */
-void pci_config_space_set(struct rte_pci_device *dev) __rte_deprecated;
-#endif /* RTE_PCI_CONFIG */
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_pci_dev_ids.h b/lib/librte_eal/common/include/rte_pci_dev_ids.h
index cf7b5487..af39fbbd 100644
--- a/lib/librte_eal/common/include/rte_pci_dev_ids.h
+++ b/lib/librte_eal/common/include/rte_pci_dev_ids.h
@@ -63,11 +63,12 @@
* This file contains a list of the PCI device IDs recognised by DPDK, which
* can be used to fill out an array of structures describing the devices.
*
- * Currently four families of devices are recognised: those supported by the
- * IGB driver, by EM driver, those supported by the IXGBE driver, and by virtio
- * driver which is a para virtualization driver running in guest virtual machine.
- * The inclusion of these in an array built using this file depends on the
- * definition of
+ * Currently five families of devices are recognised: those supported by the
+ * IGB driver, by EM driver, those supported by the IXGBE driver, those
+ * supported by the BNXT driver, and by virtio driver which is a para
+ * virtualization driver running in guest virtual machine. The inclusion of
+ * these in an array built using this file depends on the definition of
+ * RTE_PCI_DEV_ID_DECL_BNXT
* RTE_PCI_DEV_ID_DECL_EM
* RTE_PCI_DEV_ID_DECL_IGB
* RTE_PCI_DEV_ID_DECL_IGBVF
@@ -152,6 +153,10 @@
#define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev)
#endif
+#ifndef RTE_PCI_DEV_ID_DECL_BNXT
+#define RTE_PCI_DEV_ID_DECL_BNXT(vend, dev)
+#endif
+
#ifndef PCI_VENDOR_ID_INTEL
/** Vendor ID used by Intel devices */
#define PCI_VENDOR_ID_INTEL 0x8086
@@ -446,12 +451,14 @@ RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SFP)
#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
-#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15C6
-#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8
#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA
#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC
#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
+#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
+#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
@@ -506,12 +513,14 @@ RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550T1)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_KR)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_KR_L)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N)
-RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_1G_T)
-RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_SGMII)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_10G_T)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_QSFP)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_QSFP_N)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_SFP)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_1G_T)
+RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KX4)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KR)
@@ -532,12 +541,16 @@ RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BYPASS)
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_X722_A0 0x374C
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_XL710)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QEMU)
@@ -550,12 +563,16 @@ RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_20G_KR2)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_20G_KR2_A)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T4)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_25G_B)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_25G_SFP28)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_X722_A0)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_KX_X722)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_X722)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_X722)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_1G_BASE_T_X722)
RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T_X722)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_I_X722)
+RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_I_X722)
/*************** Physical FM10K devices from fm10k_type.h ***************/
@@ -686,6 +703,30 @@ RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57811_MF)
RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_MF)
#endif
+/****************** Broadcom bnxt devices ******************/
+
+#define BROADCOM_DEV_ID_57301 0x16c8
+#define BROADCOM_DEV_ID_57302 0x16c9
+#define BROADCOM_DEV_ID_57304_PF 0x16ca
+#define BROADCOM_DEV_ID_57304_VF 0x16cb
+#define BROADCOM_DEV_ID_57402 0x16d0
+#define BROADCOM_DEV_ID_57404 0x16d1
+#define BROADCOM_DEV_ID_57406_PF 0x16d2
+#define BROADCOM_DEV_ID_57406_VF 0x16d3
+#define BROADCOM_DEV_ID_57406_MF 0x16d4
+#define BROADCOM_DEV_ID_57314 0x16df
+
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF)
+RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314)
+
/*
* Undef all RTE_PCI_DEV_ID_DECL_* here.
*/
@@ -702,3 +743,4 @@ RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_MF)
#undef RTE_PCI_DEV_ID_DECL_VMXNET3
#undef RTE_PCI_DEV_ID_DECL_FM10K
#undef RTE_PCI_DEV_ID_DECL_FM10KVF
+#undef RTE_PCI_DEV_ID_DECL_BNXT
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index f8ea6d73..dbe09975 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -60,7 +60,7 @@ extern "C" {
/**
* Minor version/month number i.e. the mm in yy.mm.z
*/
-#define RTE_VER_MONTH 4
+#define RTE_VER_MONTH 7
/**
* Patch level number i.e. the z in yy.mm.z
@@ -70,14 +70,14 @@ extern "C" {
/**
* Extra string to be appended to version number
*/
-#define RTE_VER_SUFFIX ""
+#define RTE_VER_SUFFIX "-rc"
/**
* Patch release number
* 0-15 = release candidates
* 16 = release
*/
-#define RTE_VER_RELEASE 16
+#define RTE_VER_RELEASE 1
/**
* Macro to compute a version number usable for comparisons
diff --git a/lib/librte_eal/common/rte_keepalive.c b/lib/librte_eal/common/rte_keepalive.c
index 23363ec1..9765d1bd 100644
--- a/lib/librte_eal/common/rte_keepalive.c
+++ b/lib/librte_eal/common/rte_keepalive.c
@@ -42,12 +42,8 @@
struct rte_keepalive {
/** Core Liveness. */
- enum rte_keepalive_state {
- ALIVE = 1,
- MISSING = 0,
- DEAD = 2,
- GONE = 3
- } __rte_cache_aligned state_flags[RTE_KEEPALIVE_MAXCORES];
+ enum rte_keepalive_state __rte_cache_aligned state_flags[
+ RTE_KEEPALIVE_MAXCORES];
/** Last-seen-alive timestamps */
uint64_t last_alive[RTE_KEEPALIVE_MAXCORES];
@@ -68,6 +64,15 @@ struct rte_keepalive {
void *callback_data;
uint64_t tsc_initial;
uint64_t tsc_mhz;
+
+ /** Core state relay handler. */
+ rte_keepalive_relay_callback_t relay_callback;
+
+ /**
+ * Core state relay handler app data.
+ * Pointer is passed to live core handler.
+ */
+ void *relay_callback_data;
};
static void
@@ -92,16 +97,18 @@ rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer,
continue;
switch (keepcfg->state_flags[idx_core]) {
- case ALIVE: /* Alive */
- keepcfg->state_flags[idx_core] = MISSING;
+ case RTE_KA_STATE_UNUSED:
+ break;
+ case RTE_KA_STATE_ALIVE: /* Alive */
+ keepcfg->state_flags[idx_core] = RTE_KA_STATE_MISSING;
keepcfg->last_alive[idx_core] = rte_rdtsc();
break;
- case MISSING: /* MIA */
+ case RTE_KA_STATE_MISSING: /* MIA */
print_trace("Core MIA. ", keepcfg, idx_core);
- keepcfg->state_flags[idx_core] = DEAD;
+ keepcfg->state_flags[idx_core] = RTE_KA_STATE_DEAD;
break;
- case DEAD: /* Dead */
- keepcfg->state_flags[idx_core] = GONE;
+ case RTE_KA_STATE_DEAD: /* Dead */
+ keepcfg->state_flags[idx_core] = RTE_KA_STATE_GONE;
print_trace("Core died. ", keepcfg, idx_core);
if (keepcfg->callback)
keepcfg->callback(
@@ -109,9 +116,22 @@ rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer,
idx_core
);
break;
- case GONE: /* Buried */
+ case RTE_KA_STATE_GONE: /* Buried */
+ break;
+ case RTE_KA_STATE_DOZING: /* Core going idle */
+ keepcfg->state_flags[idx_core] = RTE_KA_STATE_SLEEP;
+ keepcfg->last_alive[idx_core] = rte_rdtsc();
+ break;
+ case RTE_KA_STATE_SLEEP: /* Idled core */
break;
}
+ if (keepcfg->relay_callback)
+ keepcfg->relay_callback(
+ keepcfg->relay_callback_data,
+ idx_core,
+ keepcfg->state_flags[idx_core],
+ keepcfg->last_alive[idx_core]
+ );
}
}
@@ -133,11 +153,19 @@ rte_keepalive_create(rte_keepalive_failure_callback_t callback,
return keepcfg;
}
+void rte_keepalive_register_relay_callback(struct rte_keepalive *keepcfg,
+ rte_keepalive_relay_callback_t callback,
+ void *data)
+{
+ keepcfg->relay_callback = callback;
+ keepcfg->relay_callback_data = data;
+}
+
void
rte_keepalive_register_core(struct rte_keepalive *keepcfg, const int id_core)
{
if (id_core < RTE_KEEPALIVE_MAXCORES) {
- keepcfg->active_cores[id_core] = 1;
+ keepcfg->active_cores[id_core] = RTE_KA_STATE_ALIVE;
keepcfg->last_alive[id_core] = rte_rdtsc();
}
}
@@ -145,5 +173,11 @@ rte_keepalive_register_core(struct rte_keepalive *keepcfg, const int id_core)
void
rte_keepalive_mark_alive(struct rte_keepalive *keepcfg)
{
- keepcfg->state_flags[rte_lcore_id()] = ALIVE;
+ keepcfg->state_flags[rte_lcore_id()] = RTE_KA_STATE_ALIVE;
+}
+
+void
+rte_keepalive_mark_sleep(struct rte_keepalive *keepcfg)
+{
+ keepcfg->state_flags[rte_lcore_id()] = RTE_KA_STATE_DOZING;
}
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index e1093619..30b30f33 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -44,9 +44,12 @@ VPATH += $(RTE_SDK)/lib/librte_eal/common
CFLAGS += -I$(SRCDIR)/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+ifeq ($(CONFIG_RTE_LIBRTE_IVSHMEM),y)
+# workaround for circular dependency eal -> ivshmem -> ring/mempool -> eal
CFLAGS += -I$(RTE_SDK)/lib/librte_ring
CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
CFLAGS += -I$(RTE_SDK)/lib/librte_ivshmem
+endif
CFLAGS += $(WERROR_FLAGS) -O3
LDLIBS += -ldl
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 8aafd519..543ef869 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -465,24 +465,6 @@ eal_parse_vfio_intr(const char *mode)
return -1;
}
-static inline size_t
-eal_get_hugepage_mem_size(void)
-{
- uint64_t size = 0;
- unsigned i, j;
-
- for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
- struct hugepage_info *hpi = &internal_config.hugepage_info[i];
- if (hpi->hugedir != NULL) {
- for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
- size += hpi->hugepage_sz * hpi->num_pages[j];
- }
- }
- }
-
- return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
-}
-
/* Parse the arguments for --log-level only */
static void
eal_log_level_parse(int argc, char **argv)
@@ -715,12 +697,8 @@ rte_eal_iopl_init(void)
#if defined(RTE_ARCH_X86)
if (iopl(3) != 0)
return -1;
- return 0;
-#elif defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)
- return 0; /* iopl syscall not supported for ARM/ARM64 */
-#else
- return -1;
#endif
+ return 0;
}
/* Launch threads, called at application init(). */
@@ -766,8 +744,6 @@ rte_eal_init(int argc, char **argv)
if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
if (internal_config.no_hugetlbfs)
internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
- else
- internal_config.memory = eal_get_hugepage_mem_size();
}
if (internal_config.vmware_tsc_map == 1) {
@@ -863,7 +839,7 @@ rte_eal_init(int argc, char **argv)
ret = rte_thread_setname(lcore_config[i].thread_id,
thread_name);
if (ret != 0)
- RTE_LOG(ERR, EAL,
+ RTE_LOG(DEBUG, EAL,
"Cannot set name for lcore thread\n");
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_debug.c b/lib/librte_eal/linuxapp/eal/eal_debug.c
index 907fbfa7..5fbc17c5 100644
--- a/lib/librte_eal/linuxapp/eal/eal_debug.c
+++ b/lib/librte_eal/linuxapp/eal/eal_debug.c
@@ -77,9 +77,6 @@ void __rte_panic(const char *funcname, const char *format, ...)
{
va_list ap;
- /* disable history */
- rte_log_set_history(0);
-
rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
va_start(ap, format);
rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
@@ -98,9 +95,6 @@ rte_exit(int exit_code, const char *format, ...)
{
va_list ap;
- /* disable history */
- rte_log_set_history(0);
-
if (exit_code != 0)
RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\n"
" Cause: ", exit_code);
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index 06b26a9e..47a3b20a 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -57,10 +57,8 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_debug.h>
#include <rte_log.h>
-#include <rte_mempool.h>
#include <rte_pci.h>
#include <rte_malloc.h>
#include <rte_errno.h>
@@ -889,7 +887,7 @@ rte_eal_intr_init(void)
"eal-intr-thread");
ret_1 = rte_thread_setname(intr_thread, thread_name);
if (ret_1 != 0)
- RTE_LOG(ERR, EAL,
+ RTE_LOG(DEBUG, EAL,
"Failed to set thread name for interrupt handling\n");
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_ivshmem.c b/lib/librte_eal/linuxapp/eal/eal_ivshmem.c
index 07aec694..67b3caf2 100644
--- a/lib/librte_eal/linuxapp/eal/eal_ivshmem.c
+++ b/lib/librte_eal/linuxapp/eal/eal_ivshmem.c
@@ -49,7 +49,6 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_ring.h>
-#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_common.h>
#include <rte_ivshmem.h>
@@ -184,21 +183,21 @@ overlap(const struct rte_memzone * mz1, const struct rte_memzone * mz2)
i_end2 = mz2->ioremap_addr + mz2->len;
/* check for overlap in virtual addresses */
- if (start1 > start2 && start1 < end2)
+ if (start1 >= start2 && start1 < end2)
result |= VIRT;
if (start2 >= start1 && start2 < end1)
result |= VIRT;
/* check for overlap in physical addresses */
- if (p_start1 > p_start2 && p_start1 < p_end2)
+ if (p_start1 >= p_start2 && p_start1 < p_end2)
result |= PHYS;
- if (p_start2 > p_start1 && p_start2 < p_end1)
+ if (p_start2 >= p_start1 && p_start2 < p_end1)
result |= PHYS;
/* check for overlap in ioremap addresses */
- if (i_start1 > i_start2 && i_start1 < i_end2)
+ if (i_start1 >= i_start2 && i_start1 < i_end2)
result |= IOREMAP;
- if (i_start2 > i_start1 && i_start2 < i_end1)
+ if (i_start2 >= i_start1 && i_start2 < i_end1)
result |= IOREMAP;
return result;
diff --git a/lib/librte_eal/linuxapp/eal/eal_log.c b/lib/librte_eal/linuxapp/eal/eal_log.c
index 0b133c3e..d3911004 100644
--- a/lib/librte_eal/linuxapp/eal/eal_log.c
+++ b/lib/librte_eal/linuxapp/eal/eal_log.c
@@ -50,8 +50,7 @@
#include "eal_private.h"
/*
- * default log function, used once mempool (hence log history) is
- * available
+ * default log function
*/
static ssize_t
console_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
@@ -60,9 +59,6 @@ console_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
ssize_t ret;
uint32_t loglevel;
- /* add this log in history */
- rte_log_add_in_history(buf, size);
-
/* write on stdout */
ret = fwrite(buf, 1, size, stdout);
fflush(stdout);
@@ -110,8 +106,7 @@ rte_eal_log_init(const char *id, int facility)
/* early logs */
/*
- * early log function, used during boot when mempool (hence log
- * history) is not available
+ * early log function, used before rte_eal_log_init
*/
static ssize_t
early_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 5b9132c6..5578c254 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -80,6 +80,8 @@
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/time.h>
+#include <signal.h>
+#include <setjmp.h>
#include <rte_log.h>
#include <rte_memory.h>
@@ -309,6 +311,22 @@ get_virtual_area(size_t *size, size_t hugepage_sz)
return addr;
}
+static sigjmp_buf huge_jmpenv;
+
+static void huge_sigbus_handler(int signo __rte_unused)
+{
+ siglongjmp(huge_jmpenv, 1);
+}
+
+/* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
+ * non-static local variable in the stack frame calling sigsetjmp might be
+ * clobbered by a call to longjmp.
+ */
+static int huge_wrap_sigsetjmp(void)
+{
+ return sigsetjmp(huge_jmpenv, 1);
+}
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -316,7 +334,7 @@ get_virtual_area(size_t *size, size_t hugepage_sz)
* in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
* map continguous physical blocks in contiguous virtual blocks.
*/
-static int
+static unsigned
map_all_hugepages(struct hugepage_file *hugepg_tbl,
struct hugepage_info *hpi, int orig)
{
@@ -394,9 +412,9 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
/* try to create hugepage file */
fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755);
if (fd < 0) {
- RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__,
+ RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
- return -1;
+ return i;
}
/* map the segment, and populate page tables,
@@ -404,10 +422,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0);
if (virtaddr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__,
+ RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
strerror(errno));
close(fd);
- return -1;
+ return i;
}
if (orig) {
@@ -417,12 +435,33 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
hugepg_tbl[i].final_va = virtaddr;
}
+ if (orig) {
+ /* In linux, hugetlb limitations, like cgroup, are
+ * enforced at fault time instead of mmap(), even
+ * with the option of MAP_POPULATE. Kernel will send
+ * a SIGBUS signal. To avoid to be killed, save stack
+ * environment here, if SIGBUS happens, we can jump
+ * back here.
+ */
+ if (huge_wrap_sigsetjmp()) {
+ RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
+ "hugepages of size %u MB\n",
+ (unsigned)(hugepage_sz / 0x100000));
+ munmap(virtaddr, hugepage_sz);
+ close(fd);
+ unlink(hugepg_tbl[i].filepath);
+ return i;
+ }
+ *(int *)virtaddr = 0;
+ }
+
+
/* set shared flock on the file. */
if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
- RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
+ RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
- return -1;
+ return i;
}
close(fd);
@@ -430,7 +469,8 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_addr = (char *)vma_addr + hugepage_sz;
vma_len -= hugepage_sz;
}
- return 0;
+
+ return i;
}
#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
@@ -1036,6 +1076,51 @@ calc_num_pages_per_socket(uint64_t * memory,
return total_num_pages;
}
+static inline size_t
+eal_get_hugepage_mem_size(void)
+{
+ uint64_t size = 0;
+ unsigned i, j;
+
+ for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
+ struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ if (hpi->hugedir != NULL) {
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
+ size += hpi->hugepage_sz * hpi->num_pages[j];
+ }
+ }
+ }
+
+ return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
+}
+
+static struct sigaction huge_action_old;
+static int huge_need_recover;
+
+static void
+huge_register_sigbus(void)
+{
+ sigset_t mask;
+ struct sigaction action;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGBUS);
+ action.sa_flags = 0;
+ action.sa_mask = mask;
+ action.sa_handler = huge_sigbus_handler;
+
+ huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
+}
+
+static void
+huge_recover_sigbus(void)
+{
+ if (huge_need_recover) {
+ sigaction(SIGBUS, &huge_action_old, NULL);
+ huge_need_recover = 0;
+ }
+}
+
/*
* Prepare physical memory mapping: fill configuration structure with
* these infos, return 0 on success.
@@ -1122,8 +1207,11 @@ rte_eal_hugepage_init(void)
hp_offset = 0; /* where we start the current page size entries */
+ huge_register_sigbus();
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
+ unsigned pages_old, pages_new;
struct hugepage_info *hpi;
/*
@@ -1137,10 +1225,28 @@ rte_eal_hugepage_init(void)
continue;
/* map all hugepages available */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 1) < 0){
- RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
- (unsigned)(hpi->hugepage_sz / 0x100000));
+ pages_old = hpi->num_pages[0];
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ if (pages_new < pages_old) {
+#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
+ RTE_LOG(ERR, EAL,
+ "%d not %d hugepages of size %u MB allocated\n",
+ pages_new, pages_old,
+ (unsigned)(hpi->hugepage_sz / 0x100000));
goto fail;
+#else
+ RTE_LOG(DEBUG, EAL,
+ "%d not %d hugepages of size %u MB allocated\n",
+ pages_new, pages_old,
+ (unsigned)(hpi->hugepage_sz / 0x100000));
+
+ int pages = pages_old - pages_new;
+
+ nr_hugepages -= pages;
+ hpi->num_pages[0] = pages_new;
+ if (pages_new == 0)
+ continue;
+#endif
}
/* find physical addresses and sockets for each hugepage */
@@ -1172,8 +1278,9 @@ rte_eal_hugepage_init(void)
hp_offset += new_pages_count[i];
#else
/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) < 0){
- RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ hpi->num_pages[0]) {
+ RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
goto fail;
}
@@ -1187,6 +1294,11 @@ rte_eal_hugepage_init(void)
#endif
}
+ huge_recover_sigbus();
+
+ if (internal_config.memory == 0 && internal_config.force_sockets == 0)
+ internal_config.memory = eal_get_hugepage_mem_size();
+
#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
nr_hugefiles = 0;
for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
@@ -1373,6 +1485,7 @@ rte_eal_hugepage_init(void)
return 0;
fail:
+ huge_recover_sigbus();
free(tmp_hp);
return -1;
}
@@ -1399,7 +1512,7 @@ int
rte_eal_hugepage_attach(void)
{
const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- const struct hugepage_file *hp = NULL;
+ struct hugepage_file *hp = NULL;
unsigned num_hp = 0;
unsigned i, s = 0; /* s used to track the segment number */
off_t size;
@@ -1417,7 +1530,7 @@ rte_eal_hugepage_attach(void)
if (internal_config.xen_dom0_support) {
#ifdef RTE_LIBRTE_XEN_DOM0
if (rte_xen_dom0_memory_attach() < 0) {
- RTE_LOG(ERR, EAL,"Failed to attach memory setments of primay "
+ RTE_LOG(ERR, EAL, "Failed to attach memory segments of primary "
"process\n");
return -1;
}
@@ -1481,7 +1594,7 @@ rte_eal_hugepage_attach(void)
size = getFileSize(fd_hugepage);
hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
- if (hp == NULL) {
+ if (hp == MAP_FAILED) {
RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
goto error;
}
@@ -1545,12 +1658,19 @@ rte_eal_hugepage_attach(void)
s++;
}
/* unmap the hugepage config file, since we are done using it */
- munmap((void *)(uintptr_t)hp, size);
+ munmap(hp, size);
close(fd_zero);
close(fd_hugepage);
return 0;
error:
+ s = 0;
+ while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0) {
+ munmap(mcfg->memseg[s].addr, mcfg->memseg[s].len);
+ s++;
+ }
+ if (hp != NULL && hp != MAP_FAILED)
+ munmap(hp, size);
if (fd_zero >= 0)
close(fd_zero);
if (fd_hugepage >= 0)
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c
index dbf12a84..f9c3efd2 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci.c
@@ -66,8 +66,8 @@ pci_unbind_kernel_driver(struct rte_pci_device *dev)
/* open /sys/bus/pci/devices/AAAA:BB:CC.D/driver */
snprintf(filename, sizeof(filename),
- SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/driver/unbind",
- loc->domain, loc->bus, loc->devid, loc->function);
+ "%s/" PCI_PRI_FMT "/driver/unbind", pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid, loc->function);
f = fopen(filename, "w");
if (f == NULL) /* device was not bound */
@@ -190,12 +190,13 @@ pci_find_max_end_va(void)
return RTE_PTR_ADD(last->addr, last->len);
}
-/* parse the "resource" sysfs file */
-static int
-pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+int
+pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
{
- FILE *f;
- char buf[BUFSIZ];
union pci_resource_info {
struct {
char *phys_addr;
@@ -204,6 +205,31 @@ pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
};
char *ptrs[PCI_RESOURCE_FMT_NVAL];
} res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) {
+ RTE_LOG(ERR, EAL,
+ "%s(): bad resource format\n", __func__);
+ return -1;
+ }
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): bad resource format\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+static int
+pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *f;
+ char buf[BUFSIZ];
int i;
uint64_t phys_addr, end_addr, flags;
@@ -220,21 +246,9 @@ pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
"%s(): cannot read resource\n", __func__);
goto error;
}
-
- if (rte_strsplit(buf, sizeof(buf), res_info.ptrs, 3, ' ') != 3) {
- RTE_LOG(ERR, EAL,
- "%s(): bad resource format\n", __func__);
+ if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr,
+ &end_addr, &flags) < 0)
goto error;
- }
- errno = 0;
- phys_addr = strtoull(res_info.phys_addr, NULL, 16);
- end_addr = strtoull(res_info.end_addr, NULL, 16);
- flags = strtoull(res_info.flags, NULL, 16);
- if (errno != 0) {
- RTE_LOG(ERR, EAL,
- "%s(): bad resource format\n", __func__);
- goto error;
- }
if (flags & IORESOURCE_MEM) {
dev->mem_resource[i].phys_addr = phys_addr;
@@ -306,6 +320,16 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
}
dev->id.subsystem_device_id = (uint16_t)tmp;
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0) {
+ free(dev);
+ return -1;
+ }
+ /* the least 24 bits are valid: class, subclass, program interface */
+ dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
/* get max_vfs */
dev->max_vfs = 0;
snprintf(filename, sizeof(filename), "%s/max_vfs", dirname);
@@ -453,7 +477,7 @@ rte_eal_pci_scan(void)
uint16_t domain;
uint8_t bus, devid, function;
- dir = opendir(SYSFS_PCI_DEVICES);
+ dir = opendir(pci_get_sysfs_path());
if (dir == NULL) {
RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
__func__, strerror(errno));
@@ -468,8 +492,8 @@ rte_eal_pci_scan(void)
&bus, &devid, &function) != 0)
continue;
- snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES,
- e->d_name);
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ pci_get_sysfs_path(), e->d_name);
if (pci_scan_one(dirname, domain, bus, devid, function) < 0)
goto error;
}
@@ -481,18 +505,6 @@ error:
return -1;
}
-#ifdef RTE_PCI_CONFIG
-/*
- * It is deprecated, all its configurations have been moved into
- * each PMD respectively.
- */
-void
-pci_config_space_set(__rte_unused struct rte_pci_device *dev)
-{
- RTE_LOG(DEBUG, EAL, "Nothing here, as it is deprecated\n");
-}
-#endif
-
/* Read PCI config space. */
int rte_eal_pci_read_config(const struct rte_pci_device *device,
void *buf, size_t len, off_t offset)
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_init.h b/lib/librte_eal/linuxapp/eal/eal_pci_init.h
index 7011753d..f72a2548 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_init.h
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_init.h
@@ -36,12 +36,22 @@
#include "eal_vfio.h"
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
/*
* Helper function to map PCI resources right after hugepages in virtual memory
*/
extern void *pci_map_addr;
void *pci_find_max_end_va(void);
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+int pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags);
+
int pci_uio_alloc_resource(struct rte_pci_device *dev,
struct mapped_pci_resource **uio_res);
void pci_uio_free_resource(struct rte_pci_device *dev,
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_uio.c b/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
index 068694dc..1786b754 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
@@ -35,6 +35,7 @@
#include <unistd.h>
#include <fcntl.h>
#include <dirent.h>
+#include <inttypes.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <linux/pci_regs.h>
@@ -161,14 +162,14 @@ pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
* or uio:uioX */
snprintf(dirname, sizeof(dirname),
- SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
+ "%s/" PCI_PRI_FMT "/uio", pci_get_sysfs_path(),
loc->domain, loc->bus, loc->devid, loc->function);
dir = opendir(dirname);
if (dir == NULL) {
/* retry with the parent directory */
snprintf(dirname, sizeof(dirname),
- SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
+ "%s/" PCI_PRI_FMT, pci_get_sysfs_path(),
loc->domain, loc->bus, loc->devid, loc->function);
dir = opendir(dirname);
@@ -309,7 +310,7 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
struct mapped_pci_resource *uio_res, int map_idx)
{
int fd;
- char devname[PATH_MAX]; /* contains the /dev/uioX */
+ char devname[PATH_MAX];
void *mapaddr;
struct rte_pci_addr *loc;
struct pci_map *maps;
@@ -319,7 +320,8 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
/* update devname for mmap */
snprintf(devname, sizeof(devname),
- SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/resource%d",
+ "%s/" PCI_PRI_FMT "/resource%d",
+ pci_get_sysfs_path(),
loc->domain, loc->bus, loc->devid,
loc->function, res_idx);
@@ -368,11 +370,11 @@ error:
return -1;
}
+#if defined(RTE_ARCH_X86)
int
pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
struct rte_pci_ioport *p)
{
-#if defined(RTE_ARCH_X86)
char dirname[PATH_MAX];
char filename[PATH_MAX];
int uio_num;
@@ -411,81 +413,154 @@ pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%lx\n", start);
p->base = start;
+ p->len = 0;
return 0;
+}
#else
- RTE_SET_USED(dev);
- RTE_SET_USED(bar);
- RTE_SET_USED(p);
+int
+pci_uio_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char filename[PATH_MAX];
+ uint64_t phys_addr, end_addr, flags;
+ int fd, i;
+ void *addr;
+
+ /* open and read addresses of the corresponding resource in sysfs */
+ snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT "/resource",
+ pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function);
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot open sysfs resource: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ for (i = 0; i < bar + 1; i++) {
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot read sysfs resource\n");
+ goto error;
+ }
+ }
+ if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr,
+ &end_addr, &flags) < 0)
+ goto error;
+ if ((flags & IORESOURCE_IO) == 0) {
+ RTE_LOG(ERR, EAL, "BAR %d is not an IO resource\n", bar);
+ goto error;
+ }
+ snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT "/resource%d",
+ pci_get_sysfs_path(), dev->addr.domain, dev->addr.bus,
+ dev->addr.devid, dev->addr.function, bar);
+
+ /* mmap the pci resource */
+ fd = open(filename, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
+ strerror(errno));
+ goto error;
+ }
+ addr = mmap(NULL, end_addr + 1, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ close(fd);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Cannot mmap IO port resource: %s\n",
+ strerror(errno));
+ goto error;
+ }
+
+ /* strangely, the base address is mmap addr + phys_addr */
+ p->base = (uintptr_t)addr + phys_addr;
+ p->len = end_addr + 1;
+ RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%"PRIx64"\n", p->base);
+ fclose(f);
+
+ return 0;
+
+error:
+ fclose(f);
return -1;
-#endif
}
+#endif
void
pci_uio_ioport_read(struct rte_pci_ioport *p,
void *data, size_t len, off_t offset)
{
-#if defined(RTE_ARCH_X86)
uint8_t *d;
int size;
- unsigned short reg = p->base + offset;
+ uintptr_t reg = p->base + offset;
for (d = data; len > 0; d += size, reg += size, len -= size) {
if (len >= 4) {
size = 4;
+#if defined(RTE_ARCH_X86)
*(uint32_t *)d = inl(reg);
+#else
+ *(uint32_t *)d = *(volatile uint32_t *)reg;
+#endif
} else if (len >= 2) {
size = 2;
+#if defined(RTE_ARCH_X86)
*(uint16_t *)d = inw(reg);
+#else
+ *(uint16_t *)d = *(volatile uint16_t *)reg;
+#endif
} else {
size = 1;
+#if defined(RTE_ARCH_X86)
*d = inb(reg);
- }
- }
#else
- RTE_SET_USED(p);
- RTE_SET_USED(data);
- RTE_SET_USED(len);
- RTE_SET_USED(offset);
+ *d = *(volatile uint8_t *)reg;
#endif
+ }
+ }
}
void
pci_uio_ioport_write(struct rte_pci_ioport *p,
const void *data, size_t len, off_t offset)
{
-#if defined(RTE_ARCH_X86)
const uint8_t *s;
int size;
- unsigned short reg = p->base + offset;
+ uintptr_t reg = p->base + offset;
for (s = data; len > 0; s += size, reg += size, len -= size) {
if (len >= 4) {
size = 4;
+#if defined(RTE_ARCH_X86)
outl_p(*(const uint32_t *)s, reg);
+#else
+ *(volatile uint32_t *)reg = *(const uint32_t *)s;
+#endif
} else if (len >= 2) {
size = 2;
+#if defined(RTE_ARCH_X86)
outw_p(*(const uint16_t *)s, reg);
+#else
+ *(volatile uint16_t *)reg = *(const uint16_t *)s;
+#endif
} else {
size = 1;
+#if defined(RTE_ARCH_X86)
outb_p(*s, reg);
- }
- }
#else
- RTE_SET_USED(p);
- RTE_SET_USED(data);
- RTE_SET_USED(len);
- RTE_SET_USED(offset);
+ *(volatile uint8_t *)reg = *s;
#endif
+ }
+ }
}
int
pci_uio_ioport_unmap(struct rte_pci_ioport *p)
{
- RTE_SET_USED(p);
#if defined(RTE_ARCH_X86)
+ RTE_SET_USED(p);
/* FIXME close intr fd ? */
return 0;
#else
- return -1;
+ return munmap((void *)(uintptr_t)p->base, p->len);
#endif
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
index 10266f8f..f91b9242 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
@@ -602,7 +602,7 @@ pci_vfio_get_group_no(const char *pci_addr, int *iommu_group_no)
/* try to find out IOMMU group for this device */
snprintf(linkname, sizeof(linkname),
- SYSFS_PCI_DEVICES "/%s/iommu_group", pci_addr);
+ "%s/%s/iommu_group", pci_get_sysfs_path(), pci_addr);
ret = readlink(linkname, filename, sizeof(filename));
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c b/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c
index d9188fde..d54ded88 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c
@@ -287,7 +287,10 @@ pci_vfio_mp_sync_thread(void __rte_unused * arg)
struct linger l;
l.l_onoff = 1;
l.l_linger = 60;
- setsockopt(conn_sock, SOL_SOCKET, SO_LINGER, &l, sizeof(l));
+
+ if (setsockopt(conn_sock, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) < 0)
+ RTE_LOG(WARNING, EAL, "Cannot set SO_LINGER option "
+ "on listen socket (%s)\n", strerror(errno));
ret = vfio_mp_sync_receive_request(conn_sock);
@@ -396,7 +399,7 @@ pci_vfio_mp_sync_setup(void)
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "pci-vfio-sync");
ret = rte_thread_setname(socket_thread, thread_name);
if (ret)
- RTE_LOG(ERR, EAL,
+ RTE_LOG(DEBUG, EAL,
"Failed to set thread name for secondary processes!\n");
return 0;
diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c
index 18bd8e04..9f88530e 100644
--- a/lib/librte_eal/linuxapp/eal/eal_thread.c
+++ b/lib/librte_eal/linuxapp/eal/eal_thread.c
@@ -197,3 +197,16 @@ int rte_sys_gettid(void)
{
return (int)syscall(SYS_gettid);
}
+
+int rte_thread_setname(pthread_t id, const char *name)
+{
+ int ret = -1;
+#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
+#if __GLIBC_PREREQ(2, 12)
+ ret = pthread_setname_np(id, name);
+#endif
+#endif
+ RTE_SET_USED(id);
+ RTE_SET_USED(name);
+ return ret;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c b/lib/librte_eal/linuxapp/eal/eal_timer.c
index f2abb7b6..afa32f5c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_timer.c
+++ b/lib/librte_eal/linuxapp/eal/eal_timer.c
@@ -222,8 +222,8 @@ rte_eal_hpet_init(int make_default)
snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "hpet-msb-inc");
ret = rte_thread_setname(msb_inc_thread_id, thread_name);
if (ret != 0)
- RTE_LOG(ERR, EAL,
- "ERROR: Cannot set HPET timer thread name!\n");
+ RTE_LOG(DEBUG, EAL,
+ "Cannot set HPET timer thread name!\n");
if (make_default)
eal_timer_source = EAL_TIMER_HPET;
diff --git a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
index 495eef9e..0b612bb1 100644
--- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
@@ -156,13 +156,27 @@ get_xen_memory_size(void)
* Based on physical address to caculate MFN in Xen Dom0.
*/
phys_addr_t
-rte_xen_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+rte_xen_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr)
{
- int mfn_id;
+ int mfn_id, i;
uint64_t mfn, mfn_offset;
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct rte_memseg *memseg = mcfg->memseg;
+ /* find the memory segment owning the physical address */
+ if (memseg_id == -1) {
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ if ((phy_addr >= memseg[i].phys_addr) &&
+ (phys_addr < memseg[i].phys_addr +
+ memseg[i].size)) {
+ memseg_id = i;
+ break;
+ }
+ }
+ if (memseg_id == -1)
+ return RTE_BAD_PHYS_ADDR;
+ }
+
mfn_id = (phy_addr - memseg[memseg_id].phys_addr) / RTE_PGSIZE_2M;
/*the MFN is contiguous in 2M */
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index 7e5e5984..2acdfd9b 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -113,7 +113,9 @@ struct rte_kni_mbuf {
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
char pad0[10];
uint16_t data_off; /**< Start address of data in segment buffer. */
- char pad1[4];
+ char pad1[2];
+ uint8_t nb_segs; /**< Number of segments. */
+ char pad4[1];
uint64_t ol_flags; /**< Offload features. */
char pad2[4];
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
diff --git a/lib/librte_eal/linuxapp/eal/rte_eal_version.map b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
index 12503efa..05134673 100644
--- a/lib/librte_eal/linuxapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
@@ -154,3 +154,13 @@ DPDK_16.04 {
rte_eal_primary_proc_alive;
} DPDK_2.2;
+
+DPDK_16.07 {
+ global:
+
+ pci_get_sysfs_path;
+ rte_keepalive_mark_sleep;
+ rte_keepalive_register_relay_callback;
+ rte_thread_setname;
+
+} DPDK_16.04;
diff --git a/lib/librte_eal/linuxapp/igb_uio/compat.h b/lib/librte_eal/linuxapp/igb_uio/compat.h
index c1d45a66..0d781e48 100644
--- a/lib/librte_eal/linuxapp/igb_uio/compat.h
+++ b/lib/librte_eal/linuxapp/igb_uio/compat.h
@@ -24,6 +24,15 @@
#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
#endif
+/*
+ * for kernels < 2.6.38 and backported patch that moves MSI-X entry definition
+ * to pci_regs.h Those kernels has PCI_MSIX_ENTRY_SIZE defined but not
+ * PCI_MSIX_ENTRY_CTRL_MASKBIT
+ */
+#ifndef PCI_MSIX_ENTRY_CTRL_MASKBIT
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+#endif
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34) && \
(!(defined(RHEL_RELEASE_CODE) && \
RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 9)))
diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
index 72b26923..45a5720e 100644
--- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
@@ -81,62 +81,10 @@ store_max_vfs(struct device *dev, struct device_attribute *attr,
return err ? err : count;
}
-#ifdef RTE_PCI_CONFIG
-static ssize_t
-show_extended_tag(struct device *dev, struct device_attribute *attr, char *buf)
-{
- dev_info(dev, "Deprecated\n");
-
- return 0;
-}
-
-static ssize_t
-store_extended_tag(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- dev_info(dev, "Deprecated\n");
-
- return 0;
-}
-
-static ssize_t
-show_max_read_request_size(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- dev_info(dev, "Deprecated\n");
-
- return 0;
-}
-
-static ssize_t
-store_max_read_request_size(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- dev_info(dev, "Deprecated\n");
-
- return 0;
-}
-#endif
-
static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
-#ifdef RTE_PCI_CONFIG
-static DEVICE_ATTR(extended_tag, S_IRUGO | S_IWUSR, show_extended_tag,
- store_extended_tag);
-static DEVICE_ATTR(max_read_request_size, S_IRUGO | S_IWUSR,
- show_max_read_request_size, store_max_read_request_size);
-#endif
static struct attribute *dev_attrs[] = {
&dev_attr_max_vfs.attr,
-#ifdef RTE_PCI_CONFIG
- &dev_attr_extended_tag.attr,
- &dev_attr_max_read_request_size.attr,
-#endif
NULL,
};
diff --git a/lib/librte_eal/linuxapp/kni/Makefile b/lib/librte_eal/linuxapp/kni/Makefile
index ac99d3f1..8cc6b61c 100644
--- a/lib/librte_eal/linuxapp/kni/Makefile
+++ b/lib/librte_eal/linuxapp/kni/Makefile
@@ -47,7 +47,7 @@ MODULE_CFLAGS += -Wall -Werror
ifeq ($(shell lsb_release -si 2>/dev/null),Ubuntu)
MODULE_CFLAGS += -DUBUNTU_RELEASE_CODE=$(shell lsb_release -sr | tr -d .)
UBUNTU_KERNEL_CODE := $(shell echo `grep UTS_RELEASE $(RTE_KERNELDIR)/include/generated/utsrelease.h \
- | cut -d '"' -f2 | cut -d- -f1,2 | tr .- $(comma)`,1)
+ | cut -d '"' -f2 | cut -d- -f1,2 | tr .- ,`,1)
MODULE_CFLAGS += -D"UBUNTU_KERNEL_CODE=UBUNTU_KERNEL_VERSION($(UBUNTU_KERNEL_CODE))"
endif
diff --git a/lib/librte_eal/linuxapp/kni/compat.h b/lib/librte_eal/linuxapp/kni/compat.h
index cf100b67..647ba3ce 100644
--- a/lib/librte_eal/linuxapp/kni/compat.h
+++ b/lib/librte_eal/linuxapp/kni/compat.h
@@ -14,16 +14,27 @@
#endif /* < 2.6.39 */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)
+#define HAVE_SIMPLIFIED_PERNET_OPERATIONS
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
#define sk_sleep(s) (s)->sk_sleep
+#endif
-#endif /* < 2.6.35 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+#define HAVE_CHANGE_CARRIER_CB
+#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
#define HAVE_IOV_ITER_MSGHDR
#endif
-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
#define HAVE_KIOCB_MSG_PARAM
-#endif /* < 4.1.0 */
+#define HAVE_REBUILD_HEADER
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+#define HAVE_TRANS_START_HELPER
+#endif
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c
index df224702..140a2a47 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c
@@ -3300,12 +3300,13 @@ s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
*data = E1000_READ_REG(hw, E1000_MPHY_DATA);
/* Disable access to mPHY if it was originally disabled */
- if (locked)
+ if (locked) {
ready = e1000_is_mphy_ready(hw);
if (!ready)
return -E1000_ERR_PHY;
E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
E1000_MPHY_DIS_ACCESS);
+ }
return E1000_SUCCESS;
}
@@ -3365,12 +3366,13 @@ s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
/* Disable access to mPHY if it was originally disabled */
- if (locked)
+ if (locked) {
ready = e1000_is_mphy_ready(hw);
if (!ready)
return -E1000_ERR_PHY;
E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
E1000_MPHY_DIS_ACCESS);
+ }
return E1000_SUCCESS;
}
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c
index 017dfe16..c6f4130d 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c
@@ -867,12 +867,13 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
(hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
+ }
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c
index 8c1d2fe3..92fc9fc7 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c
@@ -59,8 +59,6 @@
#undef CONFIG_DCA_MODULE
char ixgbe_driver_name[] = "ixgbe";
-static const char ixgbe_driver_string[] =
- "Intel(R) 10 Gigabit PCI Express Network Driver";
#define DRV_HW_PERF
#ifndef CONFIG_IXGBE_NAPI
@@ -79,8 +77,6 @@ static const char ixgbe_driver_string[] =
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) DRIVERNAPI DRV_HW_PERF FPGA VMDQ_TAG
const char ixgbe_driver_version[] = DRV_VERSION;
-static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2012 Intel Corporation.";
/* ixgbe_pci_tbl - PCI Device ID Table
*
diff --git a/lib/librte_eal/linuxapp/kni/kni_misc.c b/lib/librte_eal/linuxapp/kni/kni_misc.c
index ae8133f3..59d15ca6 100644
--- a/lib/librte_eal/linuxapp/kni/kni_misc.c
+++ b/lib/librte_eal/linuxapp/kni/kni_misc.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/rwsem.h>
@@ -34,6 +35,8 @@
#include <net/netns/generic.h>
#include <exec-env/rte_kni_common.h>
+
+#include "compat.h"
#include "kni_dev.h"
MODULE_LICENSE("Dual BSD/GPL");
@@ -104,7 +107,7 @@ struct kni_net {
static int __net_init kni_init_net(struct net *net)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
struct kni_net *knet = net_generic(net, kni_net_id);
#else
struct kni_net *knet;
@@ -115,7 +118,7 @@ static int __net_init kni_init_net(struct net *net)
ret = -ENOMEM;
return ret;
}
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) */
+#endif
/* Clear the bit of device in use */
clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
@@ -123,7 +126,7 @@ static int __net_init kni_init_net(struct net *net)
init_rwsem(&knet->kni_list_lock);
INIT_LIST_HEAD(&knet->kni_list_head);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
return 0;
#else
ret = net_assign_generic(net, kni_net_id, knet);
@@ -131,25 +134,25 @@ static int __net_init kni_init_net(struct net *net)
kfree(knet);
return ret;
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) */
+#endif
}
static void __net_exit kni_exit_net(struct net *net)
{
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)
+#ifndef HAVE_SIMPLIFIED_PERNET_OPERATIONS
struct kni_net *knet = net_generic(net, kni_net_id);
kfree(knet);
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) */
+#endif
}
static struct pernet_operations kni_net_ops = {
.init = kni_init_net,
.exit = kni_exit_net,
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
.id = &kni_net_id,
.size = sizeof(struct kni_net),
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) */
+#endif
};
static int __init
@@ -164,11 +167,11 @@ kni_init(void)
return -EINVAL;
}
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
rc = register_pernet_subsys(&kni_net_ops);
#else
rc = register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) */
+#endif
if (rc)
return -EPERM;
@@ -186,11 +189,11 @@ kni_init(void)
return 0;
out:
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
unregister_pernet_subsys(&kni_net_ops);
#else
register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) */
+#endif
return rc;
}
@@ -198,11 +201,11 @@ static void __exit
kni_exit(void)
{
misc_deregister(&kni_misc);
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)
+#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
unregister_pernet_subsys(&kni_net_ops);
#else
register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) */
+#endif
KNI_PRINT("####### DPDK kni module unloaded #######\n");
}
@@ -542,6 +545,15 @@ kni_ioctl_create(struct net *net,
if (pci)
pci_dev_put(pci);
+ if (kni->lad_dev)
+ memcpy(net_dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
+ else
+ /*
+ * Generate random mac address. eth_random_addr() is the newer
+ * version of generating mac address in linux kernel.
+ */
+ random_ether_addr(net_dev->dev_addr);
+
ret = register_netdev(net_dev);
if (ret) {
KNI_ERR("error %i registering device \"%s\"\n",
diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/lib/librte_eal/linuxapp/kni/kni_net.c
index cfa83398..fc82193a 100644
--- a/lib/librte_eal/linuxapp/kni/kni_net.c
+++ b/lib/librte_eal/linuxapp/kni/kni_net.c
@@ -38,6 +38,8 @@
#include <exec-env/rte_kni_common.h>
#include <kni_fifo.h>
+
+#include "compat.h"
#include "kni_dev.h"
#define WD_TIMEOUT 5 /*jiffies */
@@ -69,15 +71,6 @@ kni_net_open(struct net_device *dev)
struct rte_kni_request req;
struct kni_dev *kni = netdev_priv(dev);
- if (kni->lad_dev)
- memcpy(dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
- else
- /*
- * Generate random mac address. eth_random_addr() is the newer
- * version of generating mac address in linux kernel.
- */
- random_ether_addr(dev->dev_addr);
-
netif_start_queue(dev);
memset(&req, 0, sizeof(req));
@@ -156,7 +149,8 @@ kni_net_rx_normal(struct kni_dev *kni)
/* Transfer received packets to netif */
for (i = 0; i < num_rx; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->data_len;
+ len = kva->pkt_len;
+
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
+ kni->mbuf_kva;
@@ -165,22 +159,41 @@ kni_net_rx_normal(struct kni_dev *kni)
KNI_ERR("Out of mem, dropping pkts\n");
/* Update statistics */
kni->stats.rx_dropped++;
+ continue;
}
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
+
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+
+ if (kva->nb_segs == 1) {
memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ int nb_segs;
+ int kva_nb_segs = kva->nb_segs;
- /* Call netif interface */
- netif_rx_ni(skb);
+ for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
+ memcpy(skb_put(skb, kva->data_len),
+ data_kva, kva->data_len);
- /* Update statistics */
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
+ if (!kva->next)
+ break;
+
+ kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off
+ - kni->mbuf_va + kni->mbuf_kva;
+ }
}
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Call netif interface */
+ netif_rx_ni(skb);
+
+ /* Update statistics */
+ kni->stats.rx_bytes += len;
+ kni->stats.rx_packets++;
}
/* Burst enqueue mbufs into free_q */
@@ -317,7 +330,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
/* Copy mbufs to sk buffer and then call tx interface */
for (i = 0; i < num; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->data_len;
+ len = kva->pkt_len;
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
kni->mbuf_kva;
@@ -338,20 +351,39 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
if (skb == NULL) {
KNI_ERR("Out of mem, dropping pkts\n");
kni->stats.rx_dropped++;
+ continue;
}
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
+
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+
+ if (kva->nb_segs == 1) {
memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ int nb_segs;
+ int kva_nb_segs = kva->nb_segs;
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
+ for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
+ memcpy(skb_put(skb, kva->data_len),
+ data_kva, kva->data_len);
- /* call tx interface */
- kni_net_tx(skb, dev);
+ if (!kva->next)
+ break;
+
+ kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off
+ - kni->mbuf_va + kni->mbuf_kva;
+ }
}
+
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ kni->stats.rx_bytes += len;
+ kni->stats.rx_packets++;
+
+ /* call tx interface */
+ kni_net_tx(skb, dev);
}
/* enqueue all the mbufs from rx_q into free_q */
@@ -396,7 +428,12 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)
struct rte_kni_mbuf *pkt_kva = NULL;
struct rte_kni_mbuf *pkt_va = NULL;
- dev->trans_start = jiffies; /* save the timestamp */
+ /* save the timestamp */
+#ifdef HAVE_TRANS_START_HELPER
+ netif_trans_update(dev);
+#else
+ dev->trans_start = jiffies;
+#endif
/* Check if the length of skb is less than mbuf size */
if (skb->len > kni->mbuf_size)
@@ -604,7 +641,7 @@ kni_net_header(struct sk_buff *skb, struct net_device *dev,
/*
* Re-fill the eth header
*/
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+#ifdef HAVE_REBUILD_HEADER
static int
kni_net_rebuild_header(struct sk_buff *skb)
{
@@ -634,7 +671,7 @@ static int kni_net_set_mac(struct net_device *netdev, void *p)
return 0;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+#ifdef HAVE_CHANGE_CARRIER_CB
static int kni_net_change_carrier(struct net_device *dev, bool new_carrier)
{
if (new_carrier)
@@ -647,7 +684,7 @@ static int kni_net_change_carrier(struct net_device *dev, bool new_carrier)
static const struct header_ops kni_net_header_ops = {
.create = kni_net_header,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
+#ifdef HAVE_REBUILD_HEADER
.rebuild = kni_net_rebuild_header,
#endif /* < 4.1.0 */
.cache = NULL, /* disable caching */
@@ -664,7 +701,7 @@ static const struct net_device_ops kni_net_netdev_ops = {
.ndo_get_stats = kni_net_stats,
.ndo_tx_timeout = kni_net_tx_timeout,
.ndo_set_mac_address = kni_net_set_mac,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+#ifdef HAVE_CHANGE_CARRIER_CB
.ndo_change_carrier = kni_net_change_carrier,
#endif
};
diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile
index e8102846..0bb5dc90 100644
--- a/lib/librte_ether/Makefile
+++ b/lib/librte_ether/Makefile
@@ -41,7 +41,7 @@ CFLAGS += $(WERROR_FLAGS)
EXPORT_MAP := rte_ether_version.map
-LIBABIVER := 3
+LIBABIVER := 4
SRCS-y += rte_ethdev.c
diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ether/rte_eth_ctrl.h
index b8c7be90..c3a2c9e4 100644
--- a/lib/librte_ether/rte_eth_ctrl.h
+++ b/lib/librte_ether/rte_eth_ctrl.h
@@ -74,7 +74,12 @@ extern "C" {
#define RTE_ETH_FLOW_IPV6_EX 15
#define RTE_ETH_FLOW_IPV6_TCP_EX 16
#define RTE_ETH_FLOW_IPV6_UDP_EX 17
-#define RTE_ETH_FLOW_MAX 18
+#define RTE_ETH_FLOW_PORT 18
+ /**< Consider device port number as a flow differentiator */
+#define RTE_ETH_FLOW_VXLAN 19 /**< VXLAN protocol based flow */
+#define RTE_ETH_FLOW_GENEVE 20 /**< GENEVE protocol based flow */
+#define RTE_ETH_FLOW_NVGRE 21 /**< NVGRE protocol based flow */
+#define RTE_ETH_FLOW_MAX 22
/**
* Feature filter types
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index a31018e8..eac260f1 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -77,6 +77,12 @@ static uint8_t nb_ports;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+/* spinlock for add/remove rx callbacks */
+static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* spinlock for add/remove tx callbacks */
+static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
/* store statistics names and its offset in stats structure */
struct rte_eth_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -369,8 +375,7 @@ rte_eth_dev_is_valid_port(uint8_t port_id)
int
rte_eth_dev_socket_id(uint8_t port_id)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return -1;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
return rte_eth_devices[port_id].data->numa_node;
}
@@ -383,8 +388,7 @@ rte_eth_dev_count(void)
static enum rte_eth_dev_type
rte_eth_dev_get_device_type(uint8_t port_id)
{
- if (!rte_eth_dev_is_valid_port(port_id))
- return RTE_ETH_DEV_UNKNOWN;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, RTE_ETH_DEV_UNKNOWN);
return rte_eth_devices[port_id].dev_type;
}
@@ -402,7 +406,7 @@ rte_eth_dev_get_addr_by_port(uint8_t port_id, struct rte_pci_addr *addr)
return 0;
}
-static int
+int
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
{
char *tmp;
@@ -421,7 +425,7 @@ rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
return 0;
}
-static int
+int
rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
{
int i;
@@ -479,10 +483,7 @@ rte_eth_dev_is_detachable(uint8_t port_id)
{
uint32_t dev_flags;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -EINVAL;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
switch (rte_eth_devices[port_id].data->kdrv) {
case RTE_KDRV_IGB_UIO:
@@ -1507,9 +1508,85 @@ rte_eth_stats_reset(uint8_t port_id)
dev->data->rx_mbuf_alloc_failed = 0;
}
+static int
+get_xstats_count(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int count;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
+ if (dev->dev_ops->xstats_get_names != NULL) {
+ count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
+ if (count < 0)
+ return count;
+ } else
+ count = 0;
+ count += RTE_NB_STATS;
+ count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
+ count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
+ return count;
+}
+
+int
+rte_eth_xstats_get_names(uint8_t port_id,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned size)
+{
+ struct rte_eth_dev *dev;
+ int cnt_used_entries;
+ int cnt_expected_entries;
+ uint32_t idx, id_queue;
+
+ cnt_expected_entries = get_xstats_count(port_id);
+ if (xstats_names == NULL || cnt_expected_entries < 0 ||
+ (int)size < cnt_expected_entries)
+ return cnt_expected_entries;
+
+ /* port_id checked in get_xstats_count() */
+ dev = &rte_eth_devices[port_id];
+ if (dev->dev_ops->xstats_get_names != NULL) {
+ cnt_used_entries = (*dev->dev_ops->xstats_get_names)(
+ dev, xstats_names, size);
+ if (cnt_used_entries < 0)
+ return cnt_used_entries;
+ } else
+ /* Driver itself does not support extended stats, but
+ * still have basic stats.
+ */
+ cnt_used_entries = 0;
+
+ for (idx = 0; idx < RTE_NB_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "%s", rte_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ for (id_queue = 0; id_queue < dev->data->nb_rx_queues; id_queue++) {
+ for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "rx_q%u%s",
+ id_queue, rte_rxq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+
+ }
+ for (id_queue = 0; id_queue < dev->data->nb_tx_queues; id_queue++) {
+ for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "tx_q%u%s",
+ id_queue, rte_txq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ }
+ return cnt_used_entries;
+}
+
/* retrieve ethdev extended statistics */
int
-rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
+rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
unsigned n)
{
struct rte_eth_stats eth_stats;
@@ -1551,8 +1628,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
stats_ptr = RTE_PTR_ADD(&eth_stats,
rte_stats_strings[i].offset);
val = *stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "%s", rte_stats_strings[i].name);
+ xstats[count].id = count + xcount;
xstats[count++].value = val;
}
@@ -1563,9 +1639,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
rte_rxq_stats_strings[i].offset +
q * sizeof(uint64_t));
val = *stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_q%u_%s", q,
- rte_rxq_stats_strings[i].name);
+ xstats[count].id = count + xcount;
xstats[count++].value = val;
}
}
@@ -1577,9 +1651,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
rte_txq_stats_strings[i].offset +
q * sizeof(uint64_t));
val = *stats_ptr;
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_q%u_%s", q,
- rte_txq_stats_strings[i].name);
+ xstats[count].id = count + xcount;
xstats[count++].value = val;
}
}
@@ -1639,7 +1711,6 @@ rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
STAT_QMAP_RX);
}
-
void
rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
{
@@ -1661,6 +1732,8 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->pci_dev = dev->pci_dev;
dev_info->driver_name = dev->data->drv_name;
+ dev_info->nb_rx_queues = dev->data->nb_rx_queues;
+ dev_info->nb_tx_queues = dev->data->nb_tx_queues;
}
int
@@ -1994,10 +2067,7 @@ rte_eth_dev_rss_reta_query(uint8_t port_id,
struct rte_eth_dev *dev;
int ret;
- if (port_id >= nb_ports) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
/* Check mask bits */
ret = rte_eth_check_reta_mask(reta_conf, reta_size);
@@ -2641,10 +2711,7 @@ rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
uint16_t qid;
int rc;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
intr_handle = &dev->pci_dev->intr_handle;
@@ -2699,10 +2766,7 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
struct rte_intr_handle *intr_handle;
int rc;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
@@ -2734,10 +2798,7 @@ rte_eth_dev_rx_intr_enable(uint8_t port_id,
{
struct rte_eth_dev *dev;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2751,10 +2812,7 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id,
{
struct rte_eth_dev *dev;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2925,7 +2983,6 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
rte_errno = EINVAL;
return NULL;
}
-
struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
if (cb == NULL) {
@@ -2936,6 +2993,7 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
cb->fn.rx = fn;
cb->param = user_param;
+ rte_spinlock_lock(&rte_eth_rx_cb_lock);
/* Add the callbacks in fifo order. */
struct rte_eth_rxtx_callback *tail =
rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
@@ -2948,6 +3006,42 @@ rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
tail = tail->next;
tail->next = cb;
}
+ rte_spinlock_unlock(&rte_eth_rx_cb_lock);
+
+ return cb;
+}
+
+void *
+rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
+ rte_rx_callback_fn fn, void *user_param)
+{
+#ifndef RTE_ETHDEV_RXTX_CALLBACKS
+ rte_errno = ENOTSUP;
+ return NULL;
+#endif
+ /* check input parameters */
+ if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
+
+ if (cb == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ cb->fn.rx = fn;
+ cb->param = user_param;
+
+ rte_spinlock_lock(&rte_eth_rx_cb_lock);
+ /* Add the callbacks at fisrt position*/
+ cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
+ rte_smp_wmb();
+ rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
+ rte_spinlock_unlock(&rte_eth_rx_cb_lock);
return cb;
}
@@ -2977,6 +3071,7 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
cb->fn.tx = fn;
cb->param = user_param;
+ rte_spinlock_lock(&rte_eth_tx_cb_lock);
/* Add the callbacks in fifo order. */
struct rte_eth_rxtx_callback *tail =
rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
@@ -2989,6 +3084,7 @@ rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
tail = tail->next;
tail->next = cb;
}
+ rte_spinlock_unlock(&rte_eth_tx_cb_lock);
return cb;
}
@@ -3001,35 +3097,30 @@ rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
return -ENOTSUP;
#endif
/* Check input parameters. */
- if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
- queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ if (user_cb == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
return -EINVAL;
- }
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
- struct rte_eth_rxtx_callback *prev_cb;
-
- /* Reset head pointer and remove user cb if first in the list. */
- if (cb == user_cb) {
- dev->post_rx_burst_cbs[queue_id] = user_cb->next;
- return 0;
- }
-
- /* Remove the user cb from the callback list. */
- do {
- prev_cb = cb;
- cb = cb->next;
-
+ struct rte_eth_rxtx_callback *cb;
+ struct rte_eth_rxtx_callback **prev_cb;
+ int ret = -EINVAL;
+
+ rte_spinlock_lock(&rte_eth_rx_cb_lock);
+ prev_cb = &dev->post_rx_burst_cbs[queue_id];
+ for (; *prev_cb != NULL; prev_cb = &cb->next) {
+ cb = *prev_cb;
if (cb == user_cb) {
- prev_cb->next = user_cb->next;
- return 0;
+ /* Remove the user cb from the callback list. */
+ *prev_cb = cb->next;
+ ret = 0;
+ break;
}
+ }
+ rte_spinlock_unlock(&rte_eth_rx_cb_lock);
- } while (cb != NULL);
-
- /* Callback wasn't found. */
- return -EINVAL;
+ return ret;
}
int
@@ -3040,35 +3131,30 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
return -ENOTSUP;
#endif
/* Check input parameters. */
- if (!rte_eth_dev_is_valid_port(port_id) || user_cb == NULL ||
- queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ if (user_cb == NULL ||
+ queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
return -EINVAL;
- }
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
- struct rte_eth_rxtx_callback *prev_cb;
-
- /* Reset head pointer and remove user cb if first in the list. */
- if (cb == user_cb) {
- dev->pre_tx_burst_cbs[queue_id] = user_cb->next;
- return 0;
- }
-
- /* Remove the user cb from the callback list. */
- do {
- prev_cb = cb;
- cb = cb->next;
-
+ int ret = -EINVAL;
+ struct rte_eth_rxtx_callback *cb;
+ struct rte_eth_rxtx_callback **prev_cb;
+
+ rte_spinlock_lock(&rte_eth_tx_cb_lock);
+ prev_cb = &dev->pre_tx_burst_cbs[queue_id];
+ for (; *prev_cb != NULL; prev_cb = &cb->next) {
+ cb = *prev_cb;
if (cb == user_cb) {
- prev_cb->next = user_cb->next;
- return 0;
+ /* Remove the user cb from the callback list. */
+ *prev_cb = cb->next;
+ ret = 0;
+ break;
}
+ }
+ rte_spinlock_unlock(&rte_eth_tx_cb_lock);
- } while (cb != NULL);
-
- /* Callback wasn't found. */
- return -EINVAL;
+ return ret;
}
int
@@ -3284,10 +3370,7 @@ rte_eth_dev_get_dcb_info(uint8_t port_id,
{
struct rte_eth_dev *dev;
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -ENODEV;
- }
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 022733ec..0f173231 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -102,7 +102,7 @@
* rte_eth_dev_configure(), rte_eth_tx_queue_setup(), or
* rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the
* device and then do the reconfiguration before calling rte_eth_dev_start()
- * again. The tramsit and receive functions should not be invoked when the
+ * again. The transmit and receive functions should not be invoked when the
* device is stopped.
*
* Please note that some configuration is not stored between calls to
@@ -200,27 +200,9 @@ struct rte_eth_stats {
/**< Total of RX packets dropped by the HW,
* because there are no available mbufs (i.e. RX queues are full).
*/
- uint64_t ibadcrc __rte_deprecated;
- /**< Deprecated; Total of RX packets with CRC error. */
- uint64_t ibadlen __rte_deprecated;
- /**< Deprecated; Total of RX packets with bad length. */
uint64_t ierrors; /**< Total number of erroneous received packets. */
uint64_t oerrors; /**< Total number of failed transmitted packets. */
- uint64_t imcasts;
- /**< Deprecated; Total number of multicast received packets. */
uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */
- uint64_t fdirmatch __rte_deprecated;
- /**< Deprecated; Total number of RX packets matching a filter. */
- uint64_t fdirmiss __rte_deprecated;
- /**< Deprecated; Total number of RX packets not matching any filter. */
- uint64_t tx_pause_xon __rte_deprecated;
- /**< Deprecated; Total nb. of XON pause frame sent. */
- uint64_t rx_pause_xon __rte_deprecated;
- /**< Deprecated; Total nb. of XON pause frame received. */
- uint64_t tx_pause_xoff __rte_deprecated;
- /**< Deprecated; Total nb. of XOFF pause frame sent. */
- uint64_t rx_pause_xoff __rte_deprecated;
- /**< Deprecated; Total nb. of XOFF pause frame received. */
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
/**< Total number of queue RX packets. */
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
@@ -231,14 +213,6 @@ struct rte_eth_stats {
/**< Total number of successfully transmitted queue bytes. */
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
/**< Total number of queue packets received that are dropped. */
- uint64_t ilbpackets;
- /**< Total number of good packets received from loopback,VF Only */
- uint64_t olbpackets;
- /**< Total number of good packets transmitted to loopback,VF Only */
- uint64_t ilbbytes;
- /**< Total number of good bytes received from loopback,VF Only */
- uint64_t olbbytes;
- /**< Total number of good bytes transmitted to loopback,VF Only */
};
/**
@@ -389,8 +363,8 @@ struct rte_eth_rxmode {
*/
enum rte_vlan_type {
ETH_VLAN_TYPE_UNKNOWN = 0,
- ETH_VLAN_TYPE_INNER, /**< Single VLAN, or inner VLAN. */
- ETH_VLAN_TYPE_OUTER, /**< Outer VLAN. */
+ ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
+ ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
ETH_VLAN_TYPE_MAX,
};
@@ -439,6 +413,10 @@ struct rte_eth_rss_conf {
#define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX)
#define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
#define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
+#define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT)
+#define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN)
+#define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
+#define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE)
#define ETH_RSS_IP ( \
ETH_RSS_IPV4 | \
@@ -463,6 +441,12 @@ struct rte_eth_rss_conf {
ETH_RSS_NONFRAG_IPV4_SCTP | \
ETH_RSS_NONFRAG_IPV6_SCTP)
+#define ETH_RSS_TUNNEL ( \
+ ETH_RSS_VXLAN | \
+ ETH_RSS_GENEVE | \
+ ETH_RSS_NVGRE)
+
+
/**< Mask of valid RSS hash protocols */
#define ETH_RSS_PROTO_MASK ( \
ETH_RSS_IPV4 | \
@@ -480,7 +464,11 @@ struct rte_eth_rss_conf {
ETH_RSS_L2_PAYLOAD | \
ETH_RSS_IPV6_EX | \
ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV6_UDP_EX)
+ ETH_RSS_IPV6_UDP_EX | \
+ ETH_RSS_PORT | \
+ ETH_RSS_VXLAN | \
+ ETH_RSS_GENEVE | \
+ ETH_RSS_NVGRE)
/*
* Definitions used for redirection table entry size.
@@ -489,6 +477,7 @@ struct rte_eth_rss_conf {
*/
#define ETH_RSS_RETA_SIZE_64 64
#define ETH_RSS_RETA_SIZE_128 128
+#define ETH_RSS_RETA_SIZE_256 256
#define ETH_RSS_RETA_SIZE_512 512
#define RTE_RETA_GROUP_SIZE 64
@@ -908,6 +897,9 @@ struct rte_eth_dev_info {
struct rte_eth_desc_lim rx_desc_lim; /**< RX descriptors limits */
struct rte_eth_desc_lim tx_desc_lim; /**< TX descriptors limits */
uint32_t speed_capa; /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+ /** Configured number of rx/tx queues */
+ uint16_t nb_rx_queues; /**< Number of RX queues. */
+ uint16_t nb_tx_queues; /**< Number of TX queues. */
};
/**
@@ -940,11 +932,21 @@ struct rte_eth_txq_info {
* statistics that are not provided in the generic rte_eth_stats
* structure.
*/
-struct rte_eth_xstats {
- char name[RTE_ETH_XSTATS_NAME_SIZE];
+struct rte_eth_xstat {
+ uint64_t id;
uint64_t value;
};
+/**
+ * A name-key lookup element for extended statistics.
+ *
+ * This structure is used to map between names and ID numbers
+ * for extended ethernet statistics.
+ */
+struct rte_eth_xstat_name {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+};
+
#define ETH_DCB_NUM_TCS 8
#define ETH_MAX_VMDQ_POOL 64
@@ -1074,12 +1076,16 @@ typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
/**< @internal Reset global I/O statistics of an Ethernet device to 0. */
typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
- struct rte_eth_xstats *stats, unsigned n);
+ struct rte_eth_xstat *stats, unsigned n);
/**< @internal Get extended stats of an Ethernet device. */
typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
/**< @internal Reset extended stats of an Ethernet device. */
+typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned size);
+/**< @internal Get names of extended stats of an Ethernet device. */
+
typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
@@ -1150,7 +1156,7 @@ typedef int (*vlan_filter_set_t)(struct rte_eth_dev *dev,
typedef int (*vlan_tpid_set_t)(struct rte_eth_dev *dev,
enum rte_vlan_type type, uint16_t tpid);
-/**< @internal set the outer VLAN-TPID by an Ethernet device. */
+/**< @internal set the outer/inner VLAN-TPID by an Ethernet device. */
typedef void (*vlan_offload_set_t)(struct rte_eth_dev *dev, int mask);
/**< @internal set VLAN offload function by an Ethernet device. */
@@ -1427,6 +1433,8 @@ struct eth_dev_ops {
eth_stats_reset_t stats_reset; /**< Reset generic device statistics. */
eth_xstats_get_t xstats_get; /**< Get extended device statistics. */
eth_xstats_reset_t xstats_reset; /**< Reset extended device statistics. */
+ eth_xstats_get_names_t xstats_get_names;
+ /**< Get names of extended statistics. */
eth_queue_stats_mapping_set_t queue_stats_mapping_set;
/**< Configure per queue stat counter mapping. */
eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
@@ -1434,7 +1442,7 @@ struct eth_dev_ops {
/**< Get packet types supported and identified by device*/
mtu_set_t mtu_set; /**< Set MTU. */
vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
- vlan_tpid_set_t vlan_tpid_set; /**< Outer VLAN TPID Setup. */
+ vlan_tpid_set_t vlan_tpid_set; /**< Outer/Inner VLAN TPID Setup. */
vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */
vlan_offload_set_t vlan_offload_set; /**< Set VLAN Offload. */
vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion */
@@ -1641,7 +1649,7 @@ struct rte_eth_dev {
struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
uint8_t attached; /**< Flag indicating the port is attached */
enum rte_eth_dev_type dev_type; /**< Flag indicating the device type */
-};
+} __rte_cache_aligned;
struct rte_eth_dev_sriov {
uint8_t active; /**< SRIOV is active with 16, 32 or 64 pools */
@@ -2015,7 +2023,7 @@ int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-/*
+/**
* Return the NUMA socket to which an Ethernet device is connected
*
* @param port_id
@@ -2027,7 +2035,7 @@ int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
*/
int rte_eth_dev_socket_id(uint8_t port_id);
-/*
+/**
* Check if port_id of device is attached
*
* @param port_id
@@ -2038,7 +2046,7 @@ int rte_eth_dev_socket_id(uint8_t port_id);
*/
int rte_eth_dev_is_valid_port(uint8_t port_id);
-/*
+/**
* Allocate mbuf from mempool, setup the DMA physical address
* and then start RX for specified queue of a port. It is used
* when rx_deferred_start flag of the specified queue is true.
@@ -2056,7 +2064,7 @@ int rte_eth_dev_is_valid_port(uint8_t port_id);
*/
int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
-/*
+/**
* Stop specified RX queue of a port
*
* @param port_id
@@ -2072,7 +2080,7 @@ int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
*/
int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
-/*
+/**
* Start TX for specified queue of a port. It is used when tx_deferred_start
* flag of the specified queue is true.
*
@@ -2089,7 +2097,7 @@ int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
*/
int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
-/*
+/**
* Stop specified TX queue of a port
*
* @param port_id
@@ -2279,13 +2287,36 @@ int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
void rte_eth_stats_reset(uint8_t port_id);
/**
+ * Retrieve names of extended statistics of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param xstats_names
+ * Block of memory to insert names into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity.
+ * @param size
+ * Capacity of xstats_names (number of names).
+ * @return
+ * - positive value lower or equal to size: success. The return value
+ * is the number of entries filled in the stats table.
+ * - positive value higher than size: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * - negative value on error (invalid port id)
+ */
+int rte_eth_xstats_get_names(uint8_t port_id,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned size);
+
+/**
* Retrieve extended statistics of an Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
* @param xstats
- * A pointer to a table of structure of type *rte_eth_xstats*
- * to be filled with device statistics names and values.
+ * A pointer to a table of structure of type *rte_eth_xstat*
+ * to be filled with device statistics ids and values.
* This parameter can be set to NULL if n is 0.
* @param n
* The size of the stats table, which should be large enough to store
@@ -2299,7 +2330,7 @@ void rte_eth_stats_reset(uint8_t port_id);
* shall not be used by the caller.
* - negative value on error (invalid port id)
*/
-int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats,
+int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
unsigned n);
/**
@@ -2376,6 +2407,21 @@ void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
/**
* Retrieve the supported packet types of an Ethernet device.
*
+ * When a packet type is announced as supported, it *must* be recognized by
+ * the PMD. For instance, if RTE_PTYPE_L2_ETHER, RTE_PTYPE_L2_ETHER_VLAN
+ * and RTE_PTYPE_L3_IPV4 are announced, the PMD must return the following
+ * packet types for these packets:
+ * - Ether/IPv4 -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
+ * - Ether/Vlan/IPv4 -> RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4
+ * - Ether/[anything else] -> RTE_PTYPE_L2_ETHER
+ * - Ether/Vlan/[anything else] -> RTE_PTYPE_L2_ETHER_VLAN
+ *
+ * When a packet is received by a PMD, the most precise type must be
+ * returned among the ones supported. However a PMD is allowed to set
+ * packet type that is not in the supported list, at the condition that it
+ * is more precise. Therefore, a PMD announcing no supported packet types
+ * can still set a matching packet type in a received packet.
+ *
* @note
* Better to invoke this API after the device is already started or rx burst
* function is decided, to obtain correct supported ptypes.
@@ -2424,6 +2470,7 @@ int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
* - (-ENOTSUP) if operation is not supported.
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if *mtu* invalid.
+ * - (-EBUSY) if operation is not allowed when the port is running
*/
int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
@@ -2709,7 +2756,8 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
* on the output queue *queue_id* of the Ethernet device designated by its
* *port_id*.
* The *nb_pkts* parameter is the number of packets to send which are
- * supplied in the *tx_pkts* array of *rte_mbuf* structures.
+ * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
+ * allocated from a pool created with rte_pktmbuf_pool_create().
* The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
* up to the number of transmit descriptors available in the TX ring of the
* transmit queue.
@@ -3851,6 +3899,34 @@ int rte_eth_dev_get_dcb_info(uint8_t port_id,
void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
+/*
+* Add a callback that must be called first on packet RX on a given port
+* and queue.
+*
+* This API configures a first function to be called for each burst of
+* packets received on a given NIC port queue. The return value is a pointer
+* that can be used to later remove the callback using
+* rte_eth_remove_rx_callback().
+*
+* Multiple functions are called in the order that they are added.
+*
+* @param port_id
+* The port identifier of the Ethernet device.
+* @param queue_id
+* The queue on the Ethernet device on which the callback is to be added.
+* @param fn
+* The callback function
+* @param user_param
+* A generic pointer parameter which will be passed to each invocation of the
+* callback function on this port and queue.
+*
+* @return
+* NULL on error.
+* On success, a pointer value which can later be used to remove the callback.
+*/
+void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
+ rte_rx_callback_fn fn, void *user_param);
+
/**
* Add a callback to be called on packet TX on a given port and queue.
*
@@ -3984,7 +4060,7 @@ int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
-/*
+/**
* Retrieve number of available registers for access
*
* @param port_id
@@ -4279,6 +4355,35 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
uint32_t mask,
uint8_t en);
+/**
+* Get the port id from pci adrress or device name
+* Ex: 0000:2:00.0 or vdev name eth_pcap0
+*
+* @param name
+* pci address or name of the device
+* @param port_id
+* pointer to port identifier of the device
+* @return
+* - (0) if successful.
+* - (-ENODEV or -EINVAL) on failure.
+*/
+int
+rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
+
+/**
+* Get the device name from port id
+*
+* @param port_id
+* pointer to port identifier of the device
+* @param name
+* pci address or name of the device
+* @return
+* - (0) if successful.
+* - (-EINVAL) on failure.
+*/
+int
+rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map
index 214ecc73..e1ccebe0 100644
--- a/lib/librte_ether/rte_ether_version.map
+++ b/lib/librte_ether/rte_ether_version.map
@@ -66,7 +66,6 @@ DPDK_2.2 {
rte_eth_dev_set_vf_rxmode;
rte_eth_dev_set_vf_tx;
rte_eth_dev_set_vf_vlan_filter;
- rte_eth_dev_set_vlan_ether_type;
rte_eth_dev_set_vlan_offload;
rte_eth_dev_set_vlan_pvid;
rte_eth_dev_set_vlan_strip_on_queue;
@@ -132,3 +131,12 @@ DPDK_16.04 {
rte_eth_tx_buffer_set_err_callback;
} DPDK_2.2;
+
+DPDK_16.07 {
+ global:
+
+ rte_eth_add_first_rx_callback;
+ rte_eth_dev_get_name_by_port;
+ rte_eth_dev_get_port_by_name;
+ rte_eth_xstats_get_names;
+} DPDK_16.04;
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 7b7d1f85..e3cc3a7c 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,12 +59,10 @@
#include <rte_compat.h>
#include "rte_hash.h"
-#if defined(RTE_ARCH_X86)
-#include "rte_cmp_x86.h"
-#endif
+#include "rte_cuckoo_hash.h"
-#if defined(RTE_ARCH_ARM64)
-#include "rte_cmp_arm64.h"
+#if defined(RTE_ARCH_X86)
+#include "rte_cuckoo_hash_x86.h"
#endif
TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
@@ -74,153 +72,6 @@ static struct rte_tailq_elem rte_hash_tailq = {
};
EAL_REGISTER_TAILQ(rte_hash_tailq)
-/* Macro to enable/disable run-time checking of function parameters */
-#if defined(RTE_LIBRTE_HASH_DEBUG)
-#define RETURN_IF_TRUE(cond, retval) do { \
- if (cond) \
- return retval; \
-} while (0)
-#else
-#define RETURN_IF_TRUE(cond, retval)
-#endif
-
-/* Hash function used if none is specified */
-#if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
-#include <rte_hash_crc.h>
-#define DEFAULT_HASH_FUNC rte_hash_crc
-#else
-#include <rte_jhash.h>
-#define DEFAULT_HASH_FUNC rte_jhash
-#endif
-
-/** Number of items per bucket. */
-#define RTE_HASH_BUCKET_ENTRIES 4
-
-#define NULL_SIGNATURE 0
-
-#define KEY_ALIGNMENT 16
-
-#define LCORE_CACHE_SIZE 8
-
-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
-/*
- * All different options to select a key compare function,
- * based on the key size and custom function.
- */
-enum cmp_jump_table_case {
- KEY_CUSTOM = 0,
- KEY_16_BYTES,
- KEY_32_BYTES,
- KEY_48_BYTES,
- KEY_64_BYTES,
- KEY_80_BYTES,
- KEY_96_BYTES,
- KEY_112_BYTES,
- KEY_128_BYTES,
- KEY_OTHER_BYTES,
- NUM_KEY_CMP_CASES,
-};
-
-/*
- * Table storing all different key compare functions
- * (multi-process supported)
- */
-const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
- NULL,
- rte_hash_k16_cmp_eq,
- rte_hash_k32_cmp_eq,
- rte_hash_k48_cmp_eq,
- rte_hash_k64_cmp_eq,
- rte_hash_k80_cmp_eq,
- rte_hash_k96_cmp_eq,
- rte_hash_k112_cmp_eq,
- rte_hash_k128_cmp_eq,
- memcmp
-};
-#else
-/*
- * All different options to select a key compare function,
- * based on the key size and custom function.
- */
-enum cmp_jump_table_case {
- KEY_CUSTOM = 0,
- KEY_OTHER_BYTES,
- NUM_KEY_CMP_CASES,
-};
-
-/*
- * Table storing all different key compare functions
- * (multi-process supported)
- */
-const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
- NULL,
- memcmp
-};
-
-#endif
-
-struct lcore_cache {
- unsigned len; /**< Cache len */
- void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
-} __rte_cache_aligned;
-
-/** A hash table structure. */
-struct rte_hash {
- char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
- uint32_t entries; /**< Total table entries. */
- uint32_t num_buckets; /**< Number of buckets in table. */
- uint32_t key_len; /**< Length of hash key. */
- rte_hash_function hash_func; /**< Function used to calculate hash. */
- uint32_t hash_func_init_val; /**< Init value used by hash_func. */
- rte_hash_cmp_eq_t rte_hash_custom_cmp_eq;
- /**< Custom function used to compare keys. */
- enum cmp_jump_table_case cmp_jump_table_idx;
- /**< Indicates which compare function to use. */
- uint32_t bucket_bitmask; /**< Bitmask for getting bucket index
- from hash signature. */
- uint32_t key_entry_size; /**< Size of each key entry. */
-
- struct rte_ring *free_slots; /**< Ring that stores all indexes
- of the free slots in the key table */
- void *key_store; /**< Table storing all keys and data */
- struct rte_hash_bucket *buckets; /**< Table with buckets storing all the
- hash values and key indexes
- to the key table*/
- uint8_t hw_trans_mem_support; /**< Hardware transactional
- memory support */
- struct lcore_cache *local_free_slots;
- /**< Local cache per lcore, storing some indexes of the free slots */
-} __rte_cache_aligned;
-
-/* Structure storing both primary and secondary hashes */
-struct rte_hash_signatures {
- union {
- struct {
- hash_sig_t current;
- hash_sig_t alt;
- };
- uint64_t sig;
- };
-};
-
-/* Structure that stores key-value pair */
-struct rte_hash_key {
- union {
- uintptr_t idata;
- void *pdata;
- };
- /* Variable key size */
- char key[0];
-} __attribute__((aligned(KEY_ALIGNMENT)));
-
-/** Bucket structure */
-struct rte_hash_bucket {
- struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
- /* Includes dummy key index that always contains index 0 */
- uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
- uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
-} __rte_cache_aligned;
-
struct rte_hash *
rte_hash_find_existing(const char *name)
{
@@ -372,7 +223,7 @@ rte_hash_create(const struct rte_hash_parameters *params)
/*
* If x86 architecture is used, select appropriate compare function,
- * which may use x86 instrinsics, otherwise use memcmp
+ * which may use x86 intrinsics, otherwise use memcmp
*/
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
/* Select function to compare keys */
@@ -431,7 +282,23 @@ rte_hash_create(const struct rte_hash_parameters *params)
h->free_slots = r;
h->hw_trans_mem_support = hw_trans_mem_support;
- /* populate the free slots ring. Entry zero is reserved for key misses */
+ /* Turn on multi-writer only with explicit flat from user and TM
+ * support.
+ */
+ if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
+ if (h->hw_trans_mem_support) {
+ h->add_key = ADD_KEY_MULTIWRITER_TM;
+ } else {
+ h->add_key = ADD_KEY_MULTIWRITER;
+ h->multiwriter_lock = rte_malloc(NULL,
+ sizeof(rte_spinlock_t),
+ LCORE_CACHE_SIZE);
+ rte_spinlock_init(h->multiwriter_lock);
+ }
+ } else
+ h->add_key = ADD_KEY_SINGLEWRITER;
+
+ /* Populate free slots ring. Entry zero is reserved for key misses. */
for (i = 1; i < params->entries + 1; i++)
rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
@@ -482,6 +349,8 @@ rte_hash_free(struct rte_hash *h)
if (h->hw_trans_mem_support)
rte_free(h->local_free_slots);
+ if (h->add_key == ADD_KEY_MULTIWRITER)
+ rte_free(h->multiwriter_lock);
rte_ring_free(h->free_slots);
rte_free(h->key_store);
rte_free(h->buckets);
@@ -632,6 +501,9 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
unsigned lcore_id;
struct lcore_cache *cached_free_slots = NULL;
+ if (h->add_key == ADD_KEY_MULTIWRITER)
+ rte_spinlock_lock(h->multiwriter_lock);
+
prim_bucket_idx = sig & h->bucket_bitmask;
prim_bkt = &h->buckets[prim_bucket_idx];
rte_prefetch0(prim_bkt);
@@ -712,35 +584,67 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
rte_memcpy(new_k->key, key, h->key_len);
new_k->pdata = data;
- /* Insert new entry is there is room in the primary bucket */
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- /* Check if slot is available */
- if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
- prim_bkt->signatures[i].current = sig;
- prim_bkt->signatures[i].alt = alt_hash;
- prim_bkt->key_idx[i] = new_idx;
+#if defined(RTE_ARCH_X86) /* currently only x86 support HTM */
+ if (h->add_key == ADD_KEY_MULTIWRITER_TM) {
+ ret = rte_hash_cuckoo_insert_mw_tm(prim_bkt,
+ sig, alt_hash, new_idx);
+ if (ret >= 0)
+ return new_idx - 1;
+
+ /* Primary bucket full, need to make space for new entry */
+ ret = rte_hash_cuckoo_make_space_mw_tm(h, prim_bkt, sig,
+ alt_hash, new_idx);
+
+ if (ret >= 0)
+ return new_idx - 1;
+
+ /* Also search secondary bucket to get better occupancy */
+ ret = rte_hash_cuckoo_make_space_mw_tm(h, sec_bkt, sig,
+ alt_hash, new_idx);
+
+ if (ret >= 0)
+ return new_idx - 1;
+ } else {
+#endif
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ /* Check if slot is available */
+ if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
+ prim_bkt->signatures[i].current = sig;
+ prim_bkt->signatures[i].alt = alt_hash;
+ prim_bkt->key_idx[i] = new_idx;
+ break;
+ }
+ }
+
+ if (i != RTE_HASH_BUCKET_ENTRIES) {
+ if (h->add_key == ADD_KEY_MULTIWRITER)
+ rte_spinlock_unlock(h->multiwriter_lock);
return new_idx - 1;
}
- }
- /* Primary bucket is full, so we need to make space for new entry */
- ret = make_space_bucket(h, prim_bkt);
- /*
- * After recursive function.
- * Insert the new entry in the position of the pushed entry
- * if successful or return error and
- * store the new slot back in the ring
- */
- if (ret >= 0) {
- prim_bkt->signatures[ret].current = sig;
- prim_bkt->signatures[ret].alt = alt_hash;
- prim_bkt->key_idx[ret] = new_idx;
- return new_idx - 1;
+ /* Primary bucket full, need to make space for new entry
+ * After recursive function.
+ * Insert the new entry in the position of the pushed entry
+ * if successful or return error and
+ * store the new slot back in the ring
+ */
+ ret = make_space_bucket(h, prim_bkt);
+ if (ret >= 0) {
+ prim_bkt->signatures[ret].current = sig;
+ prim_bkt->signatures[ret].alt = alt_hash;
+ prim_bkt->key_idx[ret] = new_idx;
+ if (h->add_key == ADD_KEY_MULTIWRITER)
+ rte_spinlock_unlock(h->multiwriter_lock);
+ return new_idx - 1;
+ }
+#if defined(RTE_ARCH_X86)
}
-
+#endif
/* Error in addition, store new slot back in the ring and return error */
enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
+ if (h->add_key == ADD_KEY_MULTIWRITER)
+ rte_spinlock_unlock(h->multiwriter_lock);
return ret;
}
diff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h
new file mode 100644
index 00000000..6c76700f
--- /dev/null
+++ b/lib/librte_hash/rte_cuckoo_hash.h
@@ -0,0 +1,219 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* rte_cuckoo_hash.h
+ * This file hold Cuckoo Hash private data structures to allows include from
+ * platform specific files like rte_cuckoo_hash_x86.h
+ */
+
+#ifndef _RTE_CUCKOO_HASH_H_
+#define _RTE_CUCKOO_HASH_H_
+
+#if defined(RTE_ARCH_X86)
+#include "rte_cmp_x86.h"
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+#include "rte_cmp_arm64.h"
+#endif
+
+/* Macro to enable/disable run-time checking of function parameters */
+#if defined(RTE_LIBRTE_HASH_DEBUG)
+#define RETURN_IF_TRUE(cond, retval) do { \
+ if (cond) \
+ return retval; \
+} while (0)
+#else
+#define RETURN_IF_TRUE(cond, retval)
+#endif
+
+/* Hash function used if none is specified */
+#if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
+#include <rte_hash_crc.h>
+#define DEFAULT_HASH_FUNC rte_hash_crc
+#else
+#include <rte_jhash.h>
+#define DEFAULT_HASH_FUNC rte_jhash
+#endif
+
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+/*
+ * All different options to select a key compare function,
+ * based on the key size and custom function.
+ */
+enum cmp_jump_table_case {
+ KEY_CUSTOM = 0,
+ KEY_16_BYTES,
+ KEY_32_BYTES,
+ KEY_48_BYTES,
+ KEY_64_BYTES,
+ KEY_80_BYTES,
+ KEY_96_BYTES,
+ KEY_112_BYTES,
+ KEY_128_BYTES,
+ KEY_OTHER_BYTES,
+ NUM_KEY_CMP_CASES,
+};
+
+/*
+ * Table storing all different key compare functions
+ * (multi-process supported)
+ */
+const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
+ NULL,
+ rte_hash_k16_cmp_eq,
+ rte_hash_k32_cmp_eq,
+ rte_hash_k48_cmp_eq,
+ rte_hash_k64_cmp_eq,
+ rte_hash_k80_cmp_eq,
+ rte_hash_k96_cmp_eq,
+ rte_hash_k112_cmp_eq,
+ rte_hash_k128_cmp_eq,
+ memcmp
+};
+#else
+/*
+ * All different options to select a key compare function,
+ * based on the key size and custom function.
+ */
+enum cmp_jump_table_case {
+ KEY_CUSTOM = 0,
+ KEY_OTHER_BYTES,
+ NUM_KEY_CMP_CASES,
+};
+
+/*
+ * Table storing all different key compare functions
+ * (multi-process supported)
+ */
+const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
+ NULL,
+ memcmp
+};
+
+#endif
+
+enum add_key_case {
+ ADD_KEY_SINGLEWRITER = 0,
+ ADD_KEY_MULTIWRITER,
+ ADD_KEY_MULTIWRITER_TM,
+};
+
+/** Number of items per bucket. */
+#define RTE_HASH_BUCKET_ENTRIES 4
+
+#define NULL_SIGNATURE 0
+
+#define KEY_ALIGNMENT 16
+
+#define LCORE_CACHE_SIZE 64
+
+#define RTE_HASH_BFS_QUEUE_MAX_LEN 1000
+
+#define RTE_XABORT_CUCKOO_PATH_INVALIDED 0x4
+
+#define RTE_HASH_TSX_MAX_RETRY 10
+
+struct lcore_cache {
+ unsigned len; /**< Cache len */
+ void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
+} __rte_cache_aligned;
+
+/* Structure storing both primary and secondary hashes */
+struct rte_hash_signatures {
+ union {
+ struct {
+ hash_sig_t current;
+ hash_sig_t alt;
+ };
+ uint64_t sig;
+ };
+};
+
+/* Structure that stores key-value pair */
+struct rte_hash_key {
+ union {
+ uintptr_t idata;
+ void *pdata;
+ };
+ /* Variable key size */
+ char key[0];
+} __attribute__((aligned(KEY_ALIGNMENT)));
+
+/** Bucket structure */
+struct rte_hash_bucket {
+ struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
+ /* Includes dummy key index that always contains index 0 */
+ uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
+ uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
+} __rte_cache_aligned;
+
+/** A hash table structure. */
+struct rte_hash {
+ char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
+ uint32_t entries; /**< Total table entries. */
+ uint32_t num_buckets; /**< Number of buckets in table. */
+ uint32_t key_len; /**< Length of hash key. */
+ rte_hash_function hash_func; /**< Function used to calculate hash. */
+ uint32_t hash_func_init_val; /**< Init value used by hash_func. */
+ rte_hash_cmp_eq_t rte_hash_custom_cmp_eq;
+ /**< Custom function used to compare keys. */
+ enum cmp_jump_table_case cmp_jump_table_idx;
+ /**< Indicates which compare function to use. */
+ uint32_t bucket_bitmask; /**< Bitmask for getting bucket index
+ from hash signature. */
+ uint32_t key_entry_size; /**< Size of each key entry. */
+
+ struct rte_ring *free_slots; /**< Ring that stores all indexes
+ of the free slots in the key table */
+ void *key_store; /**< Table storing all keys and data */
+ struct rte_hash_bucket *buckets; /**< Table with buckets storing all the
+ hash values and key indexes
+ to the key table*/
+ uint8_t hw_trans_mem_support; /**< Hardware transactional
+ memory support */
+ struct lcore_cache *local_free_slots;
+ /**< Local cache per lcore, storing some indexes of the free slots */
+ enum add_key_case add_key; /**< Multi-writer hash add behavior */
+
+ rte_spinlock_t *multiwriter_lock; /**< Multi-writer spinlock for w/o TM */
+} __rte_cache_aligned;
+
+struct queue_node {
+ struct rte_hash_bucket *bkt; /* Current bucket on the bfs search */
+
+ struct queue_node *prev; /* Parent(bucket) in search path */
+ int prev_slot; /* Parent(slot) in search path */
+};
+
+#endif
diff --git a/lib/librte_hash/rte_cuckoo_hash_x86.h b/lib/librte_hash/rte_cuckoo_hash_x86.h
new file mode 100644
index 00000000..fa5630b7
--- /dev/null
+++ b/lib/librte_hash/rte_cuckoo_hash_x86.h
@@ -0,0 +1,193 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* rte_cuckoo_hash_x86.h
+ * This file holds all x86 specific Cuckoo Hash functions
+ */
+
+/* Only tries to insert at one bucket (@prim_bkt) without trying to push
+ * buckets around
+ */
+static inline unsigned
+rte_hash_cuckoo_insert_mw_tm(struct rte_hash_bucket *prim_bkt,
+ hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx)
+{
+ unsigned i, status;
+ unsigned try = 0;
+
+ while (try < RTE_HASH_TSX_MAX_RETRY) {
+ status = rte_xbegin();
+ if (likely(status == RTE_XBEGIN_STARTED)) {
+ /* Insert new entry if there is room in the primary
+ * bucket.
+ */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ /* Check if slot is available */
+ if (likely(prim_bkt->signatures[i].sig ==
+ NULL_SIGNATURE)) {
+ prim_bkt->signatures[i].current = sig;
+ prim_bkt->signatures[i].alt = alt_hash;
+ prim_bkt->key_idx[i] = new_idx;
+ break;
+ }
+ }
+ rte_xend();
+
+ if (i != RTE_HASH_BUCKET_ENTRIES)
+ return 0;
+
+ break; /* break off try loop if transaction commits */
+ } else {
+ /* If we abort we give up this cuckoo path. */
+ try++;
+ rte_pause();
+ }
+ }
+
+ return -1;
+}
+
+/* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
+ * the path head with new entry (sig, alt_hash, new_idx)
+ */
+static inline int
+rte_hash_cuckoo_move_insert_mw_tm(const struct rte_hash *h,
+ struct queue_node *leaf, uint32_t leaf_slot,
+ hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx)
+{
+ unsigned try = 0;
+ unsigned status;
+ uint32_t prev_alt_bkt_idx;
+
+ struct queue_node *prev_node, *curr_node = leaf;
+ struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
+ uint32_t prev_slot, curr_slot = leaf_slot;
+
+ while (try < RTE_HASH_TSX_MAX_RETRY) {
+ status = rte_xbegin();
+ if (likely(status == RTE_XBEGIN_STARTED)) {
+ while (likely(curr_node->prev != NULL)) {
+ prev_node = curr_node->prev;
+ prev_bkt = prev_node->bkt;
+ prev_slot = curr_node->prev_slot;
+
+ prev_alt_bkt_idx
+ = prev_bkt->signatures[prev_slot].alt
+ & h->bucket_bitmask;
+
+ if (unlikely(&h->buckets[prev_alt_bkt_idx]
+ != curr_bkt)) {
+ rte_xabort(RTE_XABORT_CUCKOO_PATH_INVALIDED);
+ }
+
+ /* Need to swap current/alt sig to allow later
+ * Cuckoo insert to move elements back to its
+ * primary bucket if available
+ */
+ curr_bkt->signatures[curr_slot].alt =
+ prev_bkt->signatures[prev_slot].current;
+ curr_bkt->signatures[curr_slot].current =
+ prev_bkt->signatures[prev_slot].alt;
+ curr_bkt->key_idx[curr_slot]
+ = prev_bkt->key_idx[prev_slot];
+
+ curr_slot = prev_slot;
+ curr_node = prev_node;
+ curr_bkt = curr_node->bkt;
+ }
+
+ curr_bkt->signatures[curr_slot].current = sig;
+ curr_bkt->signatures[curr_slot].alt = alt_hash;
+ curr_bkt->key_idx[curr_slot] = new_idx;
+
+ rte_xend();
+
+ return 0;
+ }
+
+ /* If we abort we give up this cuckoo path, since most likely it's
+ * no longer valid as TSX detected data conflict
+ */
+ try++;
+ rte_pause();
+ }
+
+ return -1;
+}
+
+/*
+ * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
+ * Cuckoo
+ */
+static inline int
+rte_hash_cuckoo_make_space_mw_tm(const struct rte_hash *h,
+ struct rte_hash_bucket *bkt,
+ hash_sig_t sig, hash_sig_t alt_hash,
+ uint32_t new_idx)
+{
+ unsigned i;
+ struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
+ struct queue_node *tail, *head;
+ struct rte_hash_bucket *curr_bkt, *alt_bkt;
+
+ tail = queue;
+ head = queue + 1;
+ tail->bkt = bkt;
+ tail->prev = NULL;
+ tail->prev_slot = -1;
+
+ /* Cuckoo bfs Search */
+ while (likely(tail != head && head <
+ queue + RTE_HASH_BFS_QUEUE_MAX_LEN - 4)) {
+ curr_bkt = tail->bkt;
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (curr_bkt->signatures[i].sig == NULL_SIGNATURE) {
+ if (likely(rte_hash_cuckoo_move_insert_mw_tm(h,
+ tail, i, sig,
+ alt_hash, new_idx) == 0))
+ return 0;
+ }
+
+ /* Enqueue new node and keep prev node info */
+ alt_bkt = &(h->buckets[curr_bkt->signatures[i].alt
+ & h->bucket_bitmask]);
+ head->bkt = alt_bkt;
+ head->prev = tail;
+ head->prev_slot = i;
+ head++;
+ }
+ tail++;
+ }
+
+ return -ENOSPC;
+}
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index ae00b658..c9612fbd 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -60,6 +60,9 @@ extern "C" {
/** Enable Hardware transactional memory support. */
#define RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT 0x01
+/** Default behavior of insertion, single writer/multi writer */
+#define RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD 0x02
+
/** Signature of key that is stored internally. */
typedef uint32_t hash_sig_t;
@@ -362,8 +365,6 @@ rte_hash_lookup_with_hash(const struct rte_hash *h,
hash_sig_t
rte_hash_hash(const struct rte_hash *h, const void *key);
-#define rte_hash_lookup_multi rte_hash_lookup_bulk
-#define rte_hash_lookup_multi_data rte_hash_lookup_bulk_data
/**
* Find multiple keys in the hash table.
* This operation is multi-thread safe.
diff --git a/lib/librte_ip_frag/Makefile b/lib/librte_ip_frag/Makefile
index 9d06780d..e97dfbd3 100644
--- a/lib/librte_ip_frag/Makefile
+++ b/lib/librte_ip_frag/Makefile
@@ -52,8 +52,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += ip_frag_internal.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IP_FRAG)-include += rte_ip_frag.h
-
-# this library depends on rte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_mempool lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_mempool
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ip_frag/ip_frag_common.h b/lib/librte_ip_frag/ip_frag_common.h
index cde6ed4a..835e4f93 100644
--- a/lib/librte_ip_frag/ip_frag_common.h
+++ b/lib/librte_ip_frag/ip_frag_common.h
@@ -38,17 +38,9 @@
/* logging macros. */
#ifdef RTE_LIBRTE_IP_FRAG_DEBUG
-
#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
-
-#define IP_FRAG_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
- __func__, __LINE__); \
-}
#else
#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
-#define IP_FRAG_ASSERT(exp) do {} while (0)
#endif /* IP_FRAG_DEBUG */
#define IPV4_KEYLEN 1
@@ -76,8 +68,8 @@ struct ip_frag_pkt * ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
struct ip_frag_pkt **free, struct ip_frag_pkt **stale);
/* these functions need to be declared here as ip_frag_process relies on them */
-struct rte_mbuf * ipv4_frag_reassemble(const struct ip_frag_pkt *fp);
-struct rte_mbuf * ipv6_frag_reassemble(const struct ip_frag_pkt *fp);
+struct rte_mbuf *ipv4_frag_reassemble(struct ip_frag_pkt *fp);
+struct rte_mbuf *ipv6_frag_reassemble(struct ip_frag_pkt *fp);
diff --git a/lib/librte_ip_frag/rte_ipv4_fragmentation.c b/lib/librte_ip_frag/rte_ipv4_fragmentation.c
index a4ed9238..a2259e80 100644
--- a/lib/librte_ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/librte_ip_frag/rte_ipv4_fragmentation.c
@@ -107,7 +107,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
/* Fragment size should be a multiply of 8. */
- IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
+ RTE_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
in_hdr = rte_pktmbuf_mtod(pkt_in, struct ipv4_hdr *);
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
diff --git a/lib/librte_ip_frag/rte_ipv4_reassembly.c b/lib/librte_ip_frag/rte_ipv4_reassembly.c
index 26d07f9a..e084ca59 100644
--- a/lib/librte_ip_frag/rte_ipv4_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv4_reassembly.c
@@ -41,11 +41,12 @@
* Reassemble fragments into one packet.
*/
struct rte_mbuf *
-ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
+ipv4_frag_reassemble(struct ip_frag_pkt *fp)
{
struct ipv4_hdr *ip_hdr;
struct rte_mbuf *m, *prev;
uint32_t i, n, ofs, first_len;
+ uint32_t curr_idx = 0;
first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
n = fp->last_idx - 1;
@@ -53,6 +54,7 @@ ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
/*start from the last fragment. */
m = fp->frags[IP_LAST_FRAG_IDX].mb;
ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
+ curr_idx = IP_LAST_FRAG_IDX;
while (ofs != first_len) {
@@ -67,6 +69,10 @@ ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len));
rte_pktmbuf_chain(fp->frags[i].mb, m);
+ /* this mbuf should not be accessed directly */
+ fp->frags[curr_idx].mb = NULL;
+ curr_idx = i;
+
/* update our last fragment and offset. */
m = fp->frags[i].mb;
ofs = fp->frags[i].ofs;
diff --git a/lib/librte_ip_frag/rte_ipv6_fragmentation.c b/lib/librte_ip_frag/rte_ipv6_fragmentation.c
index 1e30004f..db666bbf 100644
--- a/lib/librte_ip_frag/rte_ipv6_fragmentation.c
+++ b/lib/librte_ip_frag/rte_ipv6_fragmentation.c
@@ -110,7 +110,7 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
/* Fragment size should be a multiple of 8. */
- IP_FRAG_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
+ RTE_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
diff --git a/lib/librte_ip_frag/rte_ipv6_reassembly.c b/lib/librte_ip_frag/rte_ipv6_reassembly.c
index d29cb1da..21a5ef5d 100644
--- a/lib/librte_ip_frag/rte_ipv6_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv6_reassembly.c
@@ -59,13 +59,14 @@ ip_frag_memmove(char *dst, char *src, int len)
* Reassemble fragments into one packet.
*/
struct rte_mbuf *
-ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
+ipv6_frag_reassemble(struct ip_frag_pkt *fp)
{
struct ipv6_hdr *ip_hdr;
struct ipv6_extension_fragment *frag_hdr;
struct rte_mbuf *m, *prev;
uint32_t i, n, ofs, first_len;
uint32_t last_len, move_len, payload_len;
+ uint32_t curr_idx = 0;
first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
n = fp->last_idx - 1;
@@ -74,6 +75,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
m = fp->frags[IP_LAST_FRAG_IDX].mb;
ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
last_len = fp->frags[IP_LAST_FRAG_IDX].len;
+ curr_idx = IP_LAST_FRAG_IDX;
payload_len = ofs + last_len;
@@ -90,6 +92,10 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len));
rte_pktmbuf_chain(fp->frags[i].mb, m);
+ /* this mbuf should not be accessed directly */
+ fp->frags[curr_idx].mb = NULL;
+ curr_idx = i;
+
/* update our last fragment and offset. */
m = fp->frags[i].mb;
ofs = fp->frags[i].ofs;
diff --git a/lib/librte_ivshmem/Makefile b/lib/librte_ivshmem/Makefile
index 16defdba..c099438c 100644
--- a/lib/librte_ivshmem/Makefile
+++ b/lib/librte_ivshmem/Makefile
@@ -46,7 +46,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_IVSHMEM) := rte_ivshmem.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_IVSHMEM)-include := rte_ivshmem.h
-# this lib needs eal
+# this lib needs EAL, ring and mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += lib/librte_ring
DEPDIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += lib/librte_mempool
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ivshmem/rte_ivshmem.c b/lib/librte_ivshmem/rte_ivshmem.c
index c8b332ce..c26edb61 100644
--- a/lib/librte_ivshmem/rte_ivshmem.c
+++ b/lib/librte_ivshmem/rte_ivshmem.c
@@ -548,26 +548,40 @@ add_ring_to_metadata(const struct rte_ring * r,
}
static int
-add_mempool_to_metadata(const struct rte_mempool * mp,
- struct ivshmem_config * config)
+add_mempool_memzone_to_metadata(const void *addr,
+ struct ivshmem_config *config)
{
- struct rte_memzone * mz;
- int ret;
+ struct rte_memzone *mz;
- mz = get_memzone_by_addr(mp);
- ret = 0;
+ mz = get_memzone_by_addr(addr);
if (!mz) {
RTE_LOG(ERR, EAL, "Cannot find memzone for mempool!\n");
return -1;
}
- /* mempool consists of memzone and ring */
- ret = add_memzone_to_metadata(mz, config);
+ return add_memzone_to_metadata(mz, config);
+}
+
+static int
+add_mempool_to_metadata(const struct rte_mempool *mp,
+ struct ivshmem_config *config)
+{
+ struct rte_mempool_memhdr *memhdr;
+ int ret;
+
+ ret = add_mempool_memzone_to_metadata(mp, config);
if (ret < 0)
return -1;
- return add_ring_to_metadata(mp->ring, config);
+ STAILQ_FOREACH(memhdr, &mp->mem_list, next) {
+ ret = add_mempool_memzone_to_metadata(memhdr->addr, config);
+ if (ret < 0)
+ return -1;
+ }
+
+ /* mempool consists of memzone and ring */
+ return add_ring_to_metadata(mp->pool_data, config);
}
int
diff --git a/lib/librte_jobstats/rte_jobstats.h b/lib/librte_jobstats/rte_jobstats.h
index c2b285f8..b3686030 100644
--- a/lib/librte_jobstats/rte_jobstats.h
+++ b/lib/librte_jobstats/rte_jobstats.h
@@ -85,7 +85,7 @@ struct rte_jobstats {
/**< Minimum execute time. */
uint64_t max_exec_time;
- /**< Minimum execute time. */
+ /**< Maximum execute time. */
uint64_t exec_cnt;
/**< Execute count. */
diff --git a/lib/librte_kni/Makefile b/lib/librte_kni/Makefile
index 1398164d..09474461 100644
--- a/lib/librte_kni/Makefile
+++ b/lib/librte_kni/Makefile
@@ -46,8 +46,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_KNI) := rte_kni.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_KNI)-include := rte_kni.h
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_eal lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_ether
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index ea9baf4c..3028fd43 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -323,6 +323,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
char intf_name[RTE_KNI_NAMESIZE];
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
+ const struct rte_mempool *mp;
struct rte_kni_memzone_slot *slot = NULL;
if (!pktmbuf_pool || !conf || !conf->name[0])
@@ -415,12 +416,17 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
/* MBUF mempool */
- snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME,
+ snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT,
pktmbuf_pool->name);
mz = rte_memzone_lookup(mz_name);
KNI_MEM_CHECK(mz == NULL);
- dev_info.mbuf_va = mz->addr;
- dev_info.mbuf_phys = mz->phys_addr;
+ mp = (struct rte_mempool *)mz->addr;
+ /* KNI currently requires to have only one memory chunk */
+ if (mp->nb_mem_chunks != 1)
+ goto kni_fail;
+
+ dev_info.mbuf_va = STAILQ_FIRST(&mp->mem_list)->addr;
+ dev_info.mbuf_phys = STAILQ_FIRST(&mp->mem_list)->phys_addr;
ctx->pktmbuf_pool = pktmbuf_pool;
ctx->group_id = conf->group_id;
ctx->slot_id = slot->id;
diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h
index 9899a170..7363e6cf 100644
--- a/lib/librte_kni/rte_kni.h
+++ b/lib/librte_kni/rte_kni.h
@@ -113,6 +113,9 @@ void rte_kni_init(unsigned int max_kni_ifaces);
* The rte_kni_alloc shall not be called before rte_kni_init() has been
* called. rte_kni_alloc is thread safe.
*
+ * The mempool should have capacity of more than "2 x KNI_FIFO_COUNT_MAX"
+ * elements for each KNI interface allocated.
+ *
* @param pktmbuf_pool
* The mempool for allocting mbufs for packets.
* @param conf
@@ -160,8 +163,8 @@ int rte_kni_handle_request(struct rte_kni *kni);
/**
* Retrieve a burst of packets from a KNI interface. The retrieved packets are
* stored in rte_mbuf structures whose pointers are supplied in the array of
- * mbufs, and the maximum number is indicated by num. It handles the freeing of
- * the mbufs in the free queue of KNI interface.
+ * mbufs, and the maximum number is indicated by num. It handles allocating
+ * the mbufs for KNI interface alloc queue.
*
* @param kni
* The KNI interface context.
@@ -179,8 +182,8 @@ unsigned rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,
/**
* Send a burst of packets to a KNI interface. The packets to be sent out are
* stored in rte_mbuf structures whose pointers are supplied in the array of
- * mbufs, and the maximum number is indicated by num. It handles allocating
- * the mbufs for KNI interface alloc queue.
+ * mbufs, and the maximum number is indicated by num. It handles the freeing of
+ * the mbufs in the free queue of KNI interface.
*
* @param kni
* The KNI interface context.
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 8bdf6065..6f65d1c2 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -373,7 +373,6 @@ rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- rte_free(lpm->rules_tbl);
rte_free(lpm);
rte_free(te);
}
diff --git a/lib/librte_lpm/rte_lpm6.c b/lib/librte_lpm/rte_lpm6.c
index ba4353ce..32fdba01 100644
--- a/lib/librte_lpm/rte_lpm6.c
+++ b/lib/librte_lpm/rte_lpm6.c
@@ -601,7 +601,7 @@ int
rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
{
const struct rte_lpm6_tbl_entry *tbl;
- const struct rte_lpm6_tbl_entry *tbl_next;
+ const struct rte_lpm6_tbl_entry *tbl_next = NULL;
int status;
uint8_t first_byte;
uint32_t tbl24_index;
@@ -636,7 +636,7 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
{
unsigned i;
const struct rte_lpm6_tbl_entry *tbl;
- const struct rte_lpm6_tbl_entry *tbl_next;
+ const struct rte_lpm6_tbl_entry *tbl_next = NULL;
uint32_t tbl24_index;
uint8_t first_byte, next_hop;
int status;
diff --git a/lib/librte_lpm/rte_lpm6.h b/lib/librte_lpm/rte_lpm6.h
index cedcea8d..13d027f9 100644
--- a/lib/librte_lpm/rte_lpm6.h
+++ b/lib/librte_lpm/rte_lpm6.h
@@ -38,6 +38,8 @@
* RTE Longest Prefix Match for IPv6 (LPM6)
*/
+#include <stdint.h>
+
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index dc0467c9..601e5282 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -86,7 +86,7 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
struct rte_pktmbuf_pool_private default_mbp_priv;
uint16_t roomsz;
- RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
+ RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
/* if no structure is provided, assume no mbuf private area */
user_mbp_priv = opaque_arg;
@@ -100,7 +100,7 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
user_mbp_priv = &default_mbp_priv;
}
- RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
+ RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf) +
user_mbp_priv->mbuf_data_room_size +
user_mbp_priv->mbuf_priv_size);
@@ -126,9 +126,9 @@ rte_pktmbuf_init(struct rte_mempool *mp,
mbuf_size = sizeof(struct rte_mbuf) + priv_size;
buf_len = rte_pktmbuf_data_room_size(mp);
- RTE_MBUF_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
- RTE_MBUF_ASSERT(mp->elt_size >= mbuf_size);
- RTE_MBUF_ASSERT(buf_len <= UINT16_MAX);
+ RTE_ASSERT(RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) == priv_size);
+ RTE_ASSERT(mp->elt_size >= mbuf_size);
+ RTE_ASSERT(buf_len <= UINT16_MAX);
memset(m, 0, mp->elt_size);
@@ -153,6 +153,7 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,
unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
int socket_id)
{
+ struct rte_mempool *mp;
struct rte_pktmbuf_pool_private mbp_priv;
unsigned elt_size;
@@ -167,10 +168,27 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,
mbp_priv.mbuf_data_room_size = data_room_size;
mbp_priv.mbuf_priv_size = priv_size;
- return rte_mempool_create(name, n, elt_size,
- cache_size, sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, &mbp_priv, rte_pktmbuf_init, NULL,
- socket_id, 0);
+ mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
+ sizeof(struct rte_pktmbuf_pool_private), socket_id, 0);
+ if (mp == NULL)
+ return NULL;
+
+ rte_errno = rte_mempool_set_ops_byname(mp,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+ if (rte_errno != 0) {
+ RTE_LOG(ERR, MBUF, "error setting mempool handler\n");
+ return NULL;
+ }
+ rte_pktmbuf_pool_init(mp, &mbp_priv);
+
+ if (rte_mempool_populate_default(mp) < 0) {
+ rte_mempool_free(mp);
+ return NULL;
+ }
+
+ rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
+
+ return mp;
}
/* do some sanity checks on a mbuf: panic if it fails */
@@ -218,7 +236,7 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
__rte_mbuf_sanity_check(m, 1);
- fprintf(f, "dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n",
+ fprintf(f, "dump mbuf at %p, phys=%"PRIx64", buf_len=%u\n",
m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
fprintf(f, " pkt_len=%"PRIu32", ol_flags=%"PRIx64", nb_segs=%u, "
"in_port=%u\n", m->pkt_len, m->ol_flags,
@@ -228,7 +246,7 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
while (m && nb_segs != 0) {
__rte_mbuf_sanity_check(m, 0);
- fprintf(f, " segment at 0x%p, data=0x%p, data_len=%u\n",
+ fprintf(f, " segment at %p, data=%p, data_len=%u\n",
m, rte_pktmbuf_mtod(m, void *), (unsigned)m->data_len);
len = dump_len;
if (len > m->data_len)
@@ -254,12 +272,10 @@ const char *rte_get_rx_ol_flag_name(uint64_t mask)
case PKT_RX_L4_CKSUM_BAD: return "PKT_RX_L4_CKSUM_BAD";
case PKT_RX_IP_CKSUM_BAD: return "PKT_RX_IP_CKSUM_BAD";
case PKT_RX_EIP_CKSUM_BAD: return "PKT_RX_EIP_CKSUM_BAD";
- /* case PKT_RX_OVERSIZE: return "PKT_RX_OVERSIZE"; */
- /* case PKT_RX_HBUF_OVERFLOW: return "PKT_RX_HBUF_OVERFLOW"; */
- /* case PKT_RX_RECIP_ERR: return "PKT_RX_RECIP_ERR"; */
- /* case PKT_RX_MAC_ERR: return "PKT_RX_MAC_ERR"; */
+ case PKT_RX_VLAN_STRIPPED: return "PKT_RX_VLAN_STRIPPED";
case PKT_RX_IEEE1588_PTP: return "PKT_RX_IEEE1588_PTP";
case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
+ case PKT_RX_QINQ_STRIPPED: return "PKT_RX_QINQ_STRIPPED";
default: return NULL;
}
}
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 75a227d8..101485fb 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -65,10 +65,6 @@
extern "C" {
#endif
-/* deprecated options */
-#pragma GCC poison RTE_MBUF_SCATTER_GATHER
-#pragma GCC poison RTE_MBUF_REFCNT
-
/*
* Packet Offload Features Flags. It also carry packet type information.
* Critical resources. Both rx/tx shared these bits. Be cautious on any change
@@ -83,21 +79,51 @@ extern "C" {
* Keep these flags synchronized with rte_get_rx_ol_flag_name() and
* rte_get_tx_ol_flag_name().
*/
-#define PKT_RX_VLAN_PKT (1ULL << 0) /**< RX packet is a 802.1q VLAN packet. */
+
+/**
+ * RX packet is a 802.1q VLAN packet. This flag was set by PMDs when
+ * the packet is recognized as a VLAN, but the behavior between PMDs
+ * was not the same. This flag is kept for some time to avoid breaking
+ * applications and should be replaced by PKT_RX_VLAN_STRIPPED.
+ */
+#define PKT_RX_VLAN_PKT (1ULL << 0)
+
#define PKT_RX_RSS_HASH (1ULL << 1) /**< RX packet with RSS hash result. */
#define PKT_RX_FDIR (1ULL << 2) /**< RX packet with FDIR match indicate. */
#define PKT_RX_L4_CKSUM_BAD (1ULL << 3) /**< L4 cksum of RX pkt. is not OK. */
#define PKT_RX_IP_CKSUM_BAD (1ULL << 4) /**< IP cksum of RX pkt. is not OK. */
#define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) /**< External IP header checksum error. */
-#define PKT_RX_OVERSIZE (0ULL << 0) /**< Num of desc of an RX pkt oversize. */
-#define PKT_RX_HBUF_OVERFLOW (0ULL << 0) /**< Header buffer overflow. */
-#define PKT_RX_RECIP_ERR (0ULL << 0) /**< Hardware processing error. */
-#define PKT_RX_MAC_ERR (0ULL << 0) /**< MAC error. */
+
+/**
+ * A vlan has been stripped by the hardware and its tci is saved in
+ * mbuf->vlan_tci. This can only happen if vlan stripping is enabled
+ * in the RX configuration of the PMD.
+ */
+#define PKT_RX_VLAN_STRIPPED (1ULL << 6)
+
+/* hole, some bits can be reused here */
+
#define PKT_RX_IEEE1588_PTP (1ULL << 9) /**< RX IEEE1588 L2 Ethernet PT Packet. */
#define PKT_RX_IEEE1588_TMST (1ULL << 10) /**< RX IEEE1588 L2/L4 timestamped packet.*/
#define PKT_RX_FDIR_ID (1ULL << 13) /**< FD id reported if FDIR match. */
#define PKT_RX_FDIR_FLX (1ULL << 14) /**< Flexible bytes reported if FDIR match. */
-#define PKT_RX_QINQ_PKT (1ULL << 15) /**< RX packet with double VLAN stripped. */
+
+/**
+ * The 2 vlans have been stripped by the hardware and their tci are
+ * saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer).
+ * This can only happen if vlan stripping is enabled in the RX
+ * configuration of the PMD. If this flag is set, PKT_RX_VLAN_STRIPPED
+ * must also be set.
+ */
+#define PKT_RX_QINQ_STRIPPED (1ULL << 15)
+
+/**
+ * Deprecated.
+ * RX packet with double VLAN stripped.
+ * This flag is replaced by PKT_RX_QINQ_STRIPPED.
+ */
+#define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED
+
/* add new RX flags here */
/* add new TX flags here */
@@ -278,6 +304,13 @@ extern "C" {
*/
#define RTE_PTYPE_L2_ETHER_LLDP 0x00000004
/**
+ * NSH (Network Service Header) packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x894F>
+ */
+#define RTE_PTYPE_L2_ETHER_NSH 0x00000005
+/**
* Mask of layer 2 packet types.
* It is used for outer packet for tunneling cases.
*/
@@ -765,7 +798,10 @@ struct rte_mbuf {
/*
* The packet type, which is the combination of outer/inner L2, L3, L4
- * and tunnel types.
+ * and tunnel types. The packet_type is about data really present in the
+ * mbuf. Example: if vlan stripping is enabled, a received vlan packet
+ * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
+ * vlan is stripped from the data.
*/
union {
uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */
@@ -782,7 +818,8 @@ struct rte_mbuf {
uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
uint16_t data_len; /**< Amount of data in segment buffer. */
- uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order) */
+ /** VLAN TCI (CPU order), valid if PKT_RX_VLAN_STRIPPED is set. */
+ uint16_t vlan_tci;
union {
uint32_t rss; /**< RSS hash result if RSS enabled */
@@ -808,7 +845,8 @@ struct rte_mbuf {
uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
- uint16_t vlan_tci_outer; /**< Outer VLAN Tag Control Identifier (CPU order) */
+ /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */
+ uint16_t vlan_tci_outer;
/* second cache line - fields only used in slow path or on TX */
MARKER cacheline1 __rte_cache_min_aligned;
@@ -846,6 +884,44 @@ struct rte_mbuf {
uint16_t timesync;
} __rte_cache_aligned;
+/**
+ * Prefetch the first part of the mbuf
+ *
+ * The first 64 bytes of the mbuf corresponds to fields that are used early
+ * in the receive path. If the cache line of the architecture is higher than
+ * 64B, the second part will also be prefetched.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ */
+static inline void
+rte_mbuf_prefetch_part1(struct rte_mbuf *m)
+{
+ rte_prefetch0(&m->cacheline0);
+}
+
+/**
+ * Prefetch the second part of the mbuf
+ *
+ * The next 64 bytes of the mbuf corresponds to fields that are used in the
+ * transmit path. If the cache line of the architecture is higher than 64B,
+ * this function does nothing as it is expected that the full mbuf is
+ * already in cache.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ */
+static inline void
+rte_mbuf_prefetch_part2(struct rte_mbuf *m)
+{
+#if RTE_CACHE_LINE_SIZE == 64
+ rte_prefetch0(&m->cacheline1);
+#else
+ RTE_SET_USED(m);
+#endif
+}
+
+
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
/**
@@ -936,29 +1012,11 @@ struct rte_pktmbuf_pool_private {
/** check mbuf type in debug mode */
#define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
-/** check mbuf type in debug mode if mbuf pointer is not null */
-#define __rte_mbuf_sanity_check_raw(m, is_h) do { \
- if ((m) != NULL) \
- rte_mbuf_sanity_check(m, is_h); \
-} while (0)
-
-/** MBUF asserts in debug mode */
-#define RTE_MBUF_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
#else /* RTE_LIBRTE_MBUF_DEBUG */
/** check mbuf type in debug mode */
#define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
-/** check mbuf type in debug mode if mbuf pointer is not null */
-#define __rte_mbuf_sanity_check_raw(m, is_h) do { } while (0)
-
-/** MBUF asserts in debug mode */
-#define RTE_MBUF_ASSERT(exp) do { } while (0)
-
#endif /* RTE_LIBRTE_MBUF_DEBUG */
#ifdef RTE_MBUF_REFCNT_ATOMIC
@@ -1071,9 +1129,12 @@ void
rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
/**
- * @internal Allocate a new mbuf from mempool *mp*.
- * The use of that function is reserved for RTE internal needs.
- * Please use rte_pktmbuf_alloc().
+ * Allocate an unitialized mbuf from mempool *mp*.
+ *
+ * This function can be used by PMDs (especially in RX functions) to
+ * allocate an unitialized mbuf. The driver is responsible of
+ * initializing all the required fields. See rte_pktmbuf_reset().
+ * For standard needs, prefer rte_pktmbuf_alloc().
*
* @param mp
* The mempool from which mbuf is allocated.
@@ -1081,18 +1142,28 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
* - The pointer to the new mbuf on success.
* - NULL if allocation failed.
*/
-static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
+static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
void *mb = NULL;
+
if (rte_mempool_get(mp, &mb) < 0)
return NULL;
m = (struct rte_mbuf *)mb;
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mbuf_refcnt_set(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
+
return m;
}
+/* compat with older versions */
+__rte_deprecated static inline struct rte_mbuf *
+__rte_mbuf_raw_alloc(struct rte_mempool *mp)
+{
+ return rte_mbuf_raw_alloc(mp);
+}
+
/**
* @internal Put mbuf back into its original mempool.
* The use of that function is reserved for RTE internal needs.
@@ -1104,7 +1175,7 @@ static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
static inline void __attribute__((always_inline))
__rte_mbuf_raw_free(struct rte_mbuf *m)
{
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
rte_mempool_put(m->pool, m);
}
@@ -1356,7 +1427,7 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
- if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
+ if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
rte_pktmbuf_reset(m);
return m;
}
@@ -1392,22 +1463,22 @@ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
switch (count % 4) {
case 0:
while (idx != count) {
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 3:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 2:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
case 1:
- RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
+ RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
rte_mbuf_refcnt_set(mbufs[idx], 1);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
@@ -1421,6 +1492,8 @@ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
*
* After attachment we refer the mbuf we attached as 'indirect',
* while mbuf we attached to as 'direct'.
+ * The direct mbuf's reference counter is incremented.
+ *
* Right now, not supported:
* - attachment for already indirect mbuf (e.g. - mi has to be direct).
* - mbuf we trying to attach (mi) is used by someone else
@@ -1435,7 +1508,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
{
struct rte_mbuf *md;
- RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(mi) &&
+ RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
rte_mbuf_refcnt_read(mi) == 1);
/* if m is not direct, get the mbuf that embeds the data */
@@ -1474,13 +1547,17 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
*
* - restore original mbuf address and length values.
* - reset pktmbuf data and data_len to their default values.
- * All other fields of the given packet mbuf will be left intact.
+ * - decrement the direct mbuf's reference counter. When the
+ * reference counter becomes 0, the direct mbuf is freed.
+ *
+ * All other fields of the given packet mbuf will be left intact.
*
* @param m
* The indirect attached packet mbuf.
*/
static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
{
+ struct rte_mbuf *md = rte_mbuf_from_indirect(m);
struct rte_mempool *mp = m->pool;
uint32_t mbuf_size, buf_len, priv_size;
@@ -1495,6 +1572,9 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
m->data_len = 0;
m->ol_flags = 0;
+
+ if (rte_mbuf_refcnt_update(md, -1) == 0)
+ __rte_mbuf_raw_free(md);
}
static inline struct rte_mbuf* __attribute__((always_inline))
@@ -1503,17 +1583,9 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
__rte_mbuf_sanity_check(m, 0);
if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) {
-
- /* if this is an indirect mbuf, then
- * - detach mbuf
- * - free attached mbuf segment
- */
- if (RTE_MBUF_INDIRECT(m)) {
- struct rte_mbuf *md = rte_mbuf_from_indirect(m);
+ /* if this is an indirect mbuf, it is detached. */
+ if (RTE_MBUF_INDIRECT(m))
rte_pktmbuf_detach(m);
- if (rte_mbuf_refcnt_update(md, -1) == 0)
- __rte_mbuf_raw_free(md);
- }
return m;
}
return NULL;
diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index a6898eff..057a6ab4 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -38,13 +38,13 @@ CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
EXPORT_MAP := rte_mempool_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
-ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)
-SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_dom0_mempool.c
-endif
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_stack.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
diff --git a/lib/librte_mempool/rte_dom0_mempool.c b/lib/librte_mempool/rte_dom0_mempool.c
deleted file mode 100644
index 0d6d7504..00000000
--- a/lib/librte_mempool/rte_dom0_mempool.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_atomic.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_eal_memconfig.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_errno.h>
-#include <rte_string_fns.h>
-#include <rte_spinlock.h>
-
-#include "rte_mempool.h"
-
-static void
-get_phys_map(void *va, phys_addr_t pa[], uint32_t pg_num,
- uint32_t pg_sz, uint32_t memseg_id)
-{
- uint32_t i;
- uint64_t virt_addr, mfn_id;
- struct rte_mem_config *mcfg;
- uint32_t page_size = getpagesize();
-
- /* get pointer to global configuration */
- mcfg = rte_eal_get_configuration()->mem_config;
- virt_addr = (uintptr_t) mcfg->memseg[memseg_id].addr;
-
- for (i = 0; i != pg_num; i++) {
- mfn_id = ((uintptr_t)va + i * pg_sz - virt_addr) / RTE_PGSIZE_2M;
- pa[i] = mcfg->memseg[memseg_id].mfn[mfn_id] * page_size;
- }
-}
-
-/* create the mempool for supporting Dom0 */
-struct rte_mempool *
-rte_dom0_mempool_create(const char *name, unsigned elt_num, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags)
-{
- struct rte_mempool *mp = NULL;
- phys_addr_t *pa;
- char *va;
- size_t sz;
- uint32_t pg_num, pg_shift, pg_sz, total_size;
- const struct rte_memzone *mz;
- char mz_name[RTE_MEMZONE_NAMESIZE];
- int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
-
- pg_sz = RTE_PGSIZE_2M;
-
- pg_shift = rte_bsf32(pg_sz);
- total_size = rte_mempool_calc_obj_size(elt_size, flags, NULL);
-
- /* calc max memory size and max number of pages needed. */
- sz = rte_mempool_xmem_size(elt_num, total_size, pg_shift) +
- RTE_PGSIZE_2M;
- pg_num = sz >> pg_shift;
-
- /* extract physical mappings of the allocated memory. */
- pa = calloc(pg_num, sizeof (*pa));
- if (pa == NULL)
- return mp;
-
- snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME, name);
- mz = rte_memzone_reserve(mz_name, sz, socket_id, mz_flags);
- if (mz == NULL) {
- free(pa);
- return mp;
- }
-
- va = (char *)RTE_ALIGN_CEIL((uintptr_t)mz->addr, RTE_PGSIZE_2M);
- /* extract physical mappings of the allocated memory. */
- get_phys_map(va, pa, pg_num, pg_sz, mz->memseg_id);
-
- mp = rte_mempool_xmem_create(name, elt_num, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags, va, pa, pg_num, pg_shift);
-
- free(pa);
-
- return mp;
-}
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index f8781e17..d78d02b7 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 6WIND S.A.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,6 +40,7 @@
#include <inttypes.h>
#include <errno.h>
#include <sys/queue.h>
+#include <sys/mman.h>
#include <rte_common.h>
#include <rte_log.h>
@@ -127,127 +129,63 @@ static unsigned optimize_object_size(unsigned obj_size)
}
static void
-mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg)
+mempool_add_elem(struct rte_mempool *mp, void *obj, phys_addr_t physaddr)
{
struct rte_mempool_objhdr *hdr;
struct rte_mempool_objtlr *tlr __rte_unused;
- obj = (char *)obj + mp->header_size;
-
/* set mempool ptr in header */
hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
hdr->mp = mp;
+ hdr->physaddr = physaddr;
+ STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
+ mp->populated_size++;
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
tlr = __mempool_get_trailer(obj);
tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
#endif
- /* call the initializer */
- if (obj_init)
- obj_init(mp, obj_init_arg, obj, obj_idx);
/* enqueue in ring */
- rte_ring_sp_enqueue(mp->ring, obj);
+ rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
}
+/* call obj_cb() for each mempool element */
uint32_t
-rte_mempool_obj_iter(void *vaddr, uint32_t elt_num, size_t elt_sz, size_t align,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
- rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg)
+rte_mempool_obj_iter(struct rte_mempool *mp,
+ rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
{
- uint32_t i, j, k;
- uint32_t pgn, pgf;
- uintptr_t end, start, va;
- uintptr_t pg_sz;
-
- pg_sz = (uintptr_t)1 << pg_shift;
- va = (uintptr_t)vaddr;
-
- i = 0;
- j = 0;
-
- while (i != elt_num && j != pg_num) {
-
- start = RTE_ALIGN_CEIL(va, align);
- end = start + elt_sz;
-
- /* index of the first page for the next element. */
- pgf = (end >> pg_shift) - (start >> pg_shift);
-
- /* index of the last page for the current element. */
- pgn = ((end - 1) >> pg_shift) - (start >> pg_shift);
- pgn += j;
-
- /* do we have enough space left for the element. */
- if (pgn >= pg_num)
- break;
-
- for (k = j;
- k != pgn &&
- paddr[k] + pg_sz == paddr[k + 1];
- k++)
- ;
+ struct rte_mempool_objhdr *hdr;
+ void *obj;
+ unsigned n = 0;
- /*
- * if next pgn chunks of memory physically continuous,
- * use it to create next element.
- * otherwise, just skip that chunk unused.
- */
- if (k == pgn) {
- if (obj_iter != NULL)
- obj_iter(obj_iter_arg, (void *)start,
- (void *)end, i);
- va = end;
- j += pgf;
- i++;
- } else {
- va = RTE_ALIGN_CEIL((va + 1), pg_sz);
- j++;
- }
+ STAILQ_FOREACH(hdr, &mp->elt_list, next) {
+ obj = (char *)hdr + sizeof(*hdr);
+ obj_cb(mp, obj_cb_arg, obj, n);
+ n++;
}
- return i;
-}
-
-/*
- * Populate mempool with the objects.
- */
-
-struct mempool_populate_arg {
- struct rte_mempool *mp;
- rte_mempool_obj_ctor_t *obj_init;
- void *obj_init_arg;
-};
-
-static void
-mempool_obj_populate(void *arg, void *start, void *end, uint32_t idx)
-{
- struct mempool_populate_arg *pa = arg;
-
- mempool_add_elem(pa->mp, start, idx, pa->obj_init, pa->obj_init_arg);
- pa->mp->elt_va_end = (uintptr_t)end;
+ return n;
}
-static void
-mempool_populate(struct rte_mempool *mp, size_t num, size_t align,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg)
+/* call mem_cb() for each mempool memory chunk */
+uint32_t
+rte_mempool_mem_iter(struct rte_mempool *mp,
+ rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
{
- uint32_t elt_sz;
- struct mempool_populate_arg arg;
+ struct rte_mempool_memhdr *hdr;
+ unsigned n = 0;
- elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
- arg.mp = mp;
- arg.obj_init = obj_init;
- arg.obj_init_arg = obj_init_arg;
+ STAILQ_FOREACH(hdr, &mp->mem_list, next) {
+ mem_cb(mp, mem_cb_arg, hdr, n);
+ n++;
+ }
- mp->size = rte_mempool_obj_iter((void *)mp->elt_va_start,
- num, elt_sz, align,
- mp->elt_pa, mp->pg_num, mp->pg_shift,
- mempool_obj_populate, &arg);
+ return n;
}
+/* get the header, trailer and total size of a mempool element. */
uint32_t
rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
struct rte_mempool_objsz *sz)
@@ -256,24 +194,13 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
sz = (sz != NULL) ? sz : &lsz;
- /*
- * In header, we have at least the pointer to the pool, and
- * optionaly a 64 bits cookie.
- */
- sz->header_size = 0;
- sz->header_size += sizeof(struct rte_mempool *); /* ptr to pool */
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- sz->header_size += sizeof(uint64_t); /* cookie */
-#endif
+ sz->header_size = sizeof(struct rte_mempool_objhdr);
if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
RTE_MEMPOOL_ALIGN);
- /* trailer contains the cookie in debug mode */
- sz->trailer_size = 0;
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- sz->trailer_size += sizeof(uint64_t); /* cookie */
-#endif
+ sz->trailer_size = sizeof(struct rte_mempool_objtlr);
+
/* element size is 8 bytes-aligned at least */
sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
@@ -297,23 +224,6 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
sz->trailer_size = new_size - sz->header_size - sz->elt_size;
}
- if (! rte_eal_has_hugepages()) {
- /*
- * compute trailer size so that pool elements fit exactly in
- * a standard page
- */
- int page_size = getpagesize();
- int new_size = page_size - sz->header_size - sz->elt_size;
- if (new_size < 0 || (unsigned int)new_size < sz->trailer_size) {
- printf("When hugepages are disabled, pool objects "
- "can't exceed PAGE_SIZE: %d + %d + %d > %d\n",
- sz->header_size, sz->elt_size, sz->trailer_size,
- page_size);
- return 0;
- }
- sz->trailer_size = new_size;
- }
-
/* this is the size of an object, including header and trailer */
sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
@@ -325,139 +235,514 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
* Calculate maximum amount of memory required to store given number of objects.
*/
size_t
-rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz, uint32_t pg_shift)
+rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift)
{
- size_t n, pg_num, pg_sz, sz;
+ size_t obj_per_page, pg_num, pg_sz;
- pg_sz = (size_t)1 << pg_shift;
+ if (total_elt_sz == 0)
+ return 0;
- if ((n = pg_sz / elt_sz) > 0) {
- pg_num = (elt_num + n - 1) / n;
- sz = pg_num << pg_shift;
- } else {
- sz = RTE_ALIGN_CEIL(elt_sz, pg_sz) * elt_num;
- }
+ if (pg_shift == 0)
+ return total_elt_sz * elt_num;
- return sz;
+ pg_sz = (size_t)1 << pg_shift;
+ obj_per_page = pg_sz / total_elt_sz;
+ if (obj_per_page == 0)
+ return RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * elt_num;
+
+ pg_num = (elt_num + obj_per_page - 1) / obj_per_page;
+ return pg_num << pg_shift;
}
/*
* Calculate how much memory would be actually required with the
* given memory footprint to store required number of elements.
*/
+ssize_t
+rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
+ size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
+ uint32_t pg_shift)
+{
+ uint32_t elt_cnt = 0;
+ phys_addr_t start, end;
+ uint32_t paddr_idx;
+ size_t pg_sz = (size_t)1 << pg_shift;
+
+ /* if paddr is NULL, assume contiguous memory */
+ if (paddr == NULL) {
+ start = 0;
+ end = pg_sz * pg_num;
+ paddr_idx = pg_num;
+ } else {
+ start = paddr[0];
+ end = paddr[0] + pg_sz;
+ paddr_idx = 1;
+ }
+ while (elt_cnt < elt_num) {
+
+ if (end - start >= total_elt_sz) {
+ /* enough contiguous memory, add an object */
+ start += total_elt_sz;
+ elt_cnt++;
+ } else if (paddr_idx < pg_num) {
+ /* no room to store one obj, add a page */
+ if (end == paddr[paddr_idx]) {
+ end += pg_sz;
+ } else {
+ start = paddr[paddr_idx];
+ end = paddr[paddr_idx] + pg_sz;
+ }
+ paddr_idx++;
+
+ } else {
+ /* no more page, return how many elements fit */
+ return -(size_t)elt_cnt;
+ }
+ }
+
+ return (size_t)paddr_idx << pg_shift;
+}
+
+/* free a memchunk allocated with rte_memzone_reserve() */
static void
-mempool_lelem_iter(void *arg, __rte_unused void *start, void *end,
- __rte_unused uint32_t idx)
+rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
+ void *opaque)
{
- *(uintptr_t *)arg = (uintptr_t)end;
+ const struct rte_memzone *mz = opaque;
+ rte_memzone_free(mz);
}
-ssize_t
-rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+/* Free memory chunks used by a mempool. Objects must be in pool */
+static void
+rte_mempool_free_memchunks(struct rte_mempool *mp)
{
- uint32_t n;
- uintptr_t va, uv;
- size_t pg_sz, usz;
+ struct rte_mempool_memhdr *memhdr;
+ void *elt;
+
+ while (!STAILQ_EMPTY(&mp->elt_list)) {
+ rte_mempool_ops_dequeue_bulk(mp, &elt, 1);
+ (void)elt;
+ STAILQ_REMOVE_HEAD(&mp->elt_list, next);
+ mp->populated_size--;
+ }
- pg_sz = (size_t)1 << pg_shift;
- va = (uintptr_t)vaddr;
- uv = va;
+ while (!STAILQ_EMPTY(&mp->mem_list)) {
+ memhdr = STAILQ_FIRST(&mp->mem_list);
+ STAILQ_REMOVE_HEAD(&mp->mem_list, next);
+ if (memhdr->free_cb != NULL)
+ memhdr->free_cb(memhdr, memhdr->opaque);
+ rte_free(memhdr);
+ mp->nb_mem_chunks--;
+ }
+}
+
+/* Add objects in the pool, using a physically contiguous memory
+ * zone. Return the number of objects added, or a negative value
+ * on error.
+ */
+int
+rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+ phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ unsigned total_elt_sz;
+ unsigned i = 0;
+ size_t off;
+ struct rte_mempool_memhdr *memhdr;
+ int ret;
+
+ /* create the internal ring if not already done */
+ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
+ ret = rte_mempool_ops_alloc(mp);
+ if (ret != 0)
+ return ret;
+ mp->flags |= MEMPOOL_F_POOL_CREATED;
+ }
+
+ /* mempool is already populated */
+ if (mp->populated_size >= mp->size)
+ return -ENOSPC;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- if ((n = rte_mempool_obj_iter(vaddr, elt_num, elt_sz, 1,
- paddr, pg_num, pg_shift, mempool_lelem_iter,
- &uv)) != elt_num) {
- return -(ssize_t)n;
+ memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
+ if (memhdr == NULL)
+ return -ENOMEM;
+
+ memhdr->mp = mp;
+ memhdr->addr = vaddr;
+ memhdr->phys_addr = paddr;
+ memhdr->len = len;
+ memhdr->free_cb = free_cb;
+ memhdr->opaque = opaque;
+
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
+ else
+ off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
+
+ while (off + total_elt_sz <= len && mp->populated_size < mp->size) {
+ off += mp->header_size;
+ if (paddr == RTE_BAD_PHYS_ADDR)
+ mempool_add_elem(mp, (char *)vaddr + off,
+ RTE_BAD_PHYS_ADDR);
+ else
+ mempool_add_elem(mp, (char *)vaddr + off, paddr + off);
+ off += mp->elt_size + mp->trailer_size;
+ i++;
}
- uv = RTE_ALIGN_CEIL(uv, pg_sz);
- usz = uv - va;
- return usz;
+ /* not enough room to store one object */
+ if (i == 0)
+ return -EINVAL;
+
+ STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
+ mp->nb_mem_chunks++;
+ return i;
}
-#ifndef RTE_LIBRTE_XEN_DOM0
-/* stub if DOM0 support not configured */
-struct rte_mempool *
-rte_dom0_mempool_create(const char *name __rte_unused,
- unsigned n __rte_unused,
- unsigned elt_size __rte_unused,
- unsigned cache_size __rte_unused,
- unsigned private_data_size __rte_unused,
- rte_mempool_ctor_t *mp_init __rte_unused,
- void *mp_init_arg __rte_unused,
- rte_mempool_obj_ctor_t *obj_init __rte_unused,
- void *obj_init_arg __rte_unused,
- int socket_id __rte_unused,
- unsigned flags __rte_unused)
-{
- rte_errno = EINVAL;
- return NULL;
+/* Add objects in the pool, using a table of physical pages. Return the
+ * number of objects added, or a negative value on error.
+ */
+int
+rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
+{
+ uint32_t i, n;
+ int ret, cnt = 0;
+ size_t pg_sz = (size_t)1 << pg_shift;
+
+ /* mempool must not be populated */
+ if (mp->nb_mem_chunks != 0)
+ return -EEXIST;
+
+ if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
+ return rte_mempool_populate_phys(mp, vaddr, RTE_BAD_PHYS_ADDR,
+ pg_num * pg_sz, free_cb, opaque);
+
+ for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {
+
+ /* populate with the largest group of contiguous pages */
+ for (n = 1; (i + n) < pg_num &&
+ paddr[i] + pg_sz == paddr[i+n]; n++)
+ ;
+
+ ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
+ paddr[i], n * pg_sz, free_cb, opaque);
+ if (ret < 0) {
+ rte_mempool_free_memchunks(mp);
+ return ret;
+ }
+ /* no need to call the free callback for next chunks */
+ free_cb = NULL;
+ cnt += ret;
+ }
+ return cnt;
}
-#endif
-/* create the mempool */
-struct rte_mempool *
-rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags)
-{
- if (rte_xen_dom0_supported())
- return rte_dom0_mempool_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags);
- else
- return rte_mempool_xmem_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags,
- NULL, NULL, MEMPOOL_PG_NUM_DEFAULT,
- MEMPOOL_PG_SHIFT_MAX);
+/* Populate the mempool with a virtual area. Return the number of
+ * objects added, or a negative value on error.
+ */
+int
+rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque)
+{
+ phys_addr_t paddr;
+ size_t off, phys_len;
+ int ret, cnt = 0;
+
+ /* mempool must not be populated */
+ if (mp->nb_mem_chunks != 0)
+ return -EEXIST;
+ /* address and len must be page-aligned */
+ if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr)
+ return -EINVAL;
+ if (RTE_ALIGN_CEIL(len, pg_sz) != len)
+ return -EINVAL;
+
+ if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
+ return rte_mempool_populate_phys(mp, addr, RTE_BAD_PHYS_ADDR,
+ len, free_cb, opaque);
+
+ for (off = 0; off + pg_sz <= len &&
+ mp->populated_size < mp->size; off += phys_len) {
+
+ paddr = rte_mem_virt2phy(addr + off);
+ /* required for xen_dom0 to get the machine address */
+ paddr = rte_mem_phy2mch(-1, paddr);
+
+ if (paddr == RTE_BAD_PHYS_ADDR) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* populate with the largest group of contiguous pages */
+ for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
+ phys_addr_t paddr_tmp;
+
+ paddr_tmp = rte_mem_virt2phy(addr + off + phys_len);
+ paddr_tmp = rte_mem_phy2mch(-1, paddr_tmp);
+
+ if (paddr_tmp != paddr + phys_len)
+ break;
+ }
+
+ ret = rte_mempool_populate_phys(mp, addr + off, paddr,
+ phys_len, free_cb, opaque);
+ if (ret < 0)
+ goto fail;
+ /* no need to call the free callback for next chunks */
+ free_cb = NULL;
+ cnt += ret;
+ }
+
+ return cnt;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+
+/* Default function to populate the mempool: allocate memory in memzones,
+ * and populate them. Return the number of objects added, or a negative
+ * value on error.
+ */
+int
+rte_mempool_populate_default(struct rte_mempool *mp)
+{
+ int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ size_t size, total_elt_sz, align, pg_sz, pg_shift;
+ phys_addr_t paddr;
+ unsigned mz_id, n;
+ int ret;
+
+ /* mempool must not be populated */
+ if (mp->nb_mem_chunks != 0)
+ return -EEXIST;
+
+ if (rte_eal_has_hugepages()) {
+ pg_shift = 0; /* not needed, zone is physically contiguous */
+ pg_sz = 0;
+ align = RTE_CACHE_LINE_SIZE;
+ } else {
+ pg_sz = getpagesize();
+ pg_shift = rte_bsf32(pg_sz);
+ align = pg_sz;
+ }
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
+ size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift);
+
+ ret = snprintf(mz_name, sizeof(mz_name),
+ RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ ret = -ENAMETOOLONG;
+ goto fail;
+ }
+
+ mz = rte_memzone_reserve_aligned(mz_name, size,
+ mp->socket_id, mz_flags, align);
+ /* not enough memory, retry with the biggest zone we have */
+ if (mz == NULL)
+ mz = rte_memzone_reserve_aligned(mz_name, 0,
+ mp->socket_id, mz_flags, align);
+ if (mz == NULL) {
+ ret = -rte_errno;
+ goto fail;
+ }
+
+ if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
+ paddr = RTE_BAD_PHYS_ADDR;
+ else
+ paddr = mz->phys_addr;
+
+ if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
+ ret = rte_mempool_populate_phys(mp, mz->addr,
+ paddr, mz->len,
+ rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ else
+ ret = rte_mempool_populate_virt(mp, mz->addr,
+ mz->len, pg_sz,
+ rte_mempool_memchunk_mz_free,
+ (void *)(uintptr_t)mz);
+ if (ret < 0)
+ goto fail;
+ }
+
+ return mp->size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return ret;
+}
+
+/* return the memory size required for mempool objects in anonymous mem */
+static size_t
+get_anon_size(const struct rte_mempool *mp)
+{
+ size_t size, total_elt_sz, pg_sz, pg_shift;
+
+ pg_sz = getpagesize();
+ pg_shift = rte_bsf32(pg_sz);
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift);
+
+ return size;
+}
+
+/* unmap a memory zone mapped by rte_mempool_populate_anon() */
+static void
+rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
+ void *opaque)
+{
+ munmap(opaque, get_anon_size(memhdr->mp));
+}
+
+/* populate the mempool with an anonymous mapping */
+int
+rte_mempool_populate_anon(struct rte_mempool *mp)
+{
+ size_t size;
+ int ret;
+ char *addr;
+
+ /* mempool is already populated, error */
+ if (!STAILQ_EMPTY(&mp->mem_list)) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ /* get chunk of virtually continuous memory */
+ size = get_anon_size(mp);
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ rte_errno = errno;
+ return 0;
+ }
+ /* can't use MMAP_LOCKED, it does not exist on BSD */
+ if (mlock(addr, size) < 0) {
+ rte_errno = errno;
+ munmap(addr, size);
+ return 0;
+ }
+
+ ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(),
+ rte_mempool_memchunk_anon_free, addr);
+ if (ret == 0)
+ goto fail;
+
+ return mp->populated_size;
+
+ fail:
+ rte_mempool_free_memchunks(mp);
+ return 0;
+}
+
+/* free a mempool */
+void
+rte_mempool_free(struct rte_mempool *mp)
+{
+ struct rte_mempool_list *mempool_list = NULL;
+ struct rte_tailq_entry *te;
+
+ if (mp == NULL)
+ return;
+
+ mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, mempool_list, next) {
+ if (te->data == (void *)mp)
+ break;
+ }
+
+ if (te != NULL) {
+ TAILQ_REMOVE(mempool_list, te, next);
+ rte_free(te);
+ }
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_mempool_free_memchunks(mp);
+ rte_mempool_ops_free(mp);
+ rte_memzone_free(mp->mz);
+}
+
+static void
+mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
+{
+ cache->size = size;
+ cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
+ cache->len = 0;
}
/*
- * Create the mempool over already allocated chunk of memory.
- * That external memory buffer can consists of physically disjoint pages.
- * Setting vaddr to NULL, makes mempool to fallback to original behaviour
- * and allocate space for mempool and it's elements as one big chunk of
- * physically continuos memory.
- * */
+ * Create and initialize a cache for objects that are retrieved from and
+ * returned to an underlying mempool. This structure is identical to the
+ * local_cache[lcore_id] pointed to by the mempool structure.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size, int socket_id)
+{
+ struct rte_mempool_cache *cache;
+
+ if (size == 0 || size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (cache == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache.\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ mempool_cache_init(cache, size);
+
+ return cache;
+}
+
+/*
+ * Free a cache. It's the responsibility of the user to make sure that any
+ * remaining objects in the cache are flushed to the corresponding
+ * mempool.
+ */
+void
+rte_mempool_cache_free(struct rte_mempool_cache *cache)
+{
+ rte_free(cache);
+}
+
+/* create an empty mempool */
struct rte_mempool *
-rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags, void *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ int socket_id, unsigned flags)
{
char mz_name[RTE_MEMZONE_NAMESIZE];
- char rg_name[RTE_RING_NAMESIZE];
struct rte_mempool_list *mempool_list;
struct rte_mempool *mp = NULL;
struct rte_tailq_entry *te = NULL;
- struct rte_ring *r = NULL;
- const struct rte_memzone *mz;
+ const struct rte_memzone *mz = NULL;
size_t mempool_size;
int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
- int rg_flags = 0;
- void *obj;
struct rte_mempool_objsz objsz;
- void *startaddr;
- int page_size = getpagesize();
+ unsigned lcore_id;
+ int ret;
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
RTE_CACHE_LINE_MASK) != 0);
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
RTE_CACHE_LINE_MASK) != 0);
@@ -474,28 +759,10 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
return NULL;
}
- /* check that we have both VA and PA */
- if (vaddr != NULL && paddr == NULL) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- /* Check that pg_num and pg_shift parameters are valid. */
- if (pg_num < RTE_DIM(mp->elt_pa) || pg_shift > MEMPOOL_PG_SHIFT_MAX) {
- rte_errno = EINVAL;
- return NULL;
- }
-
/* "no cache align" imply "no spread" */
if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
flags |= MEMPOOL_F_NO_SPREAD;
- /* ring flags */
- if (flags & MEMPOOL_F_SP_PUT)
- rg_flags |= RING_F_SP_ENQ;
- if (flags & MEMPOOL_F_SC_GET)
- rg_flags |= RING_F_SC_DEQ;
-
/* calculate mempool object sizes. */
if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
rte_errno = EINVAL;
@@ -504,15 +771,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);
- /* allocate the ring that will be used to store objects */
- /* Ring functions will return appropriate errors if we are
- * running as a secondary process etc., so no checks made
- * in this function for that condition */
- snprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, name);
- r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
- if (r == NULL)
- goto exit_unlock;
-
/*
* reserve a memory zone for this mempool: private data is
* cache-aligned
@@ -520,17 +778,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
private_data_size = (private_data_size +
RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
- if (! rte_eal_has_hugepages()) {
- /*
- * expand private data size to a whole page, so that the
- * first pool element will start on a new standard page
- */
- int head = sizeof(struct rte_mempool);
- int new_size = (private_data_size + head) % page_size;
- if (new_size) {
- private_data_size += page_size - new_size;
- }
- }
/* try to allocate tailq entry */
te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
@@ -539,89 +786,57 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
goto exit_unlock;
}
- /*
- * If user provided an external memory buffer, then use it to
- * store mempool objects. Otherwise reserve a memzone that is large
- * enough to hold mempool header and metadata plus mempool objects.
- */
- mempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;
+ mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size);
+ mempool_size += private_data_size;
mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
- if (vaddr == NULL)
- mempool_size += (size_t)objsz.total_size * n;
- if (! rte_eal_has_hugepages()) {
- /*
- * we want the memory pool to start on a page boundary,
- * because pool elements crossing page boundaries would
- * result in discontiguous physical addresses
- */
- mempool_size += page_size;
+ ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ rte_errno = ENAMETOOLONG;
+ goto exit_unlock;
}
- snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
-
mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
if (mz == NULL)
goto exit_unlock;
- if (rte_eal_has_hugepages()) {
- startaddr = (void*)mz->addr;
- } else {
- /* align memory pool start address on a page boundary */
- unsigned long addr = (unsigned long)mz->addr;
- if (addr & (page_size - 1)) {
- addr += page_size;
- addr &= ~(page_size - 1);
- }
- startaddr = (void*)addr;
- }
-
/* init the mempool structure */
- mp = startaddr;
- memset(mp, 0, sizeof(*mp));
- snprintf(mp->name, sizeof(mp->name), "%s", name);
- mp->phys_addr = mz->phys_addr;
- mp->ring = r;
+ mp = mz->addr;
+ memset(mp, 0, MEMPOOL_HEADER_SIZE(mp, cache_size));
+ ret = snprintf(mp->name, sizeof(mp->name), "%s", name);
+ if (ret < 0 || ret >= (int)sizeof(mp->name)) {
+ rte_errno = ENAMETOOLONG;
+ goto exit_unlock;
+ }
+ mp->mz = mz;
+ mp->socket_id = socket_id;
mp->size = n;
mp->flags = flags;
+ mp->socket_id = socket_id;
mp->elt_size = objsz.elt_size;
mp->header_size = objsz.header_size;
mp->trailer_size = objsz.trailer_size;
+ /* Size of default caches, zero means disabled. */
mp->cache_size = cache_size;
- mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
mp->private_data_size = private_data_size;
+ STAILQ_INIT(&mp->elt_list);
+ STAILQ_INIT(&mp->mem_list);
- /* calculate address of the first element for continuous mempool. */
- obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
- private_data_size;
- obj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);
-
- /* populate address translation fields. */
- mp->pg_num = pg_num;
- mp->pg_shift = pg_shift;
- mp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));
-
- /* mempool elements allocated together with mempool */
- if (vaddr == NULL) {
- mp->elt_va_start = (uintptr_t)obj;
- mp->elt_pa[0] = mp->phys_addr +
- (mp->elt_va_start - (uintptr_t)mp);
+ /*
+ * local_cache pointer is set even if cache_size is zero.
+ * The local_cache points to just past the elt_pa[] array.
+ */
+ mp->local_cache = (struct rte_mempool_cache *)
+ RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0));
- /* mempool elements in a separate chunk of memory. */
- } else {
- mp->elt_va_start = (uintptr_t)vaddr;
- memcpy(mp->elt_pa, paddr, sizeof (mp->elt_pa[0]) * pg_num);
+ /* Init all default caches. */
+ if (cache_size != 0) {
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+ mempool_cache_init(&mp->local_cache[lcore_id],
+ cache_size);
}
- mp->elt_va_end = mp->elt_va_start;
-
- /* call the initializer */
- if (mp_init)
- mp_init(mp, mp_init_arg);
-
- mempool_populate(mp, n, 1, obj_init, obj_init_arg);
-
- te->data = (void *) mp;
+ te->data = mp;
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
TAILQ_INSERT_TAIL(mempool_list, te, next);
@@ -632,30 +847,132 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
exit_unlock:
rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK);
- rte_ring_free(r);
rte_free(te);
+ rte_mempool_free(mp);
+ return NULL;
+}
+
+/* create the mempool */
+struct rte_mempool *
+rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags)
+{
+ struct rte_mempool *mp;
+
+ mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
+ private_data_size, socket_id, flags);
+ if (mp == NULL)
+ return NULL;
+
+ /*
+ * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
+ * set the correct index into the table of ops structs.
+ */
+ if (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET))
+ rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
+ else if (flags & MEMPOOL_F_SP_PUT)
+ rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
+ else if (flags & MEMPOOL_F_SC_GET)
+ rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
+ else
+ rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
+
+ /* call the mempool priv initializer */
+ if (mp_init)
+ mp_init(mp, mp_init_arg);
+
+ if (rte_mempool_populate_default(mp) < 0)
+ goto fail;
+
+ /* call the object initializers */
+ if (obj_init)
+ rte_mempool_obj_iter(mp, obj_init, obj_init_arg);
+
+ return mp;
+ fail:
+ rte_mempool_free(mp);
+ return NULL;
+}
+
+/*
+ * Create the mempool over already allocated chunk of memory.
+ * That external memory buffer can consists of physically disjoint pages.
+ * Setting vaddr to NULL, makes mempool to fallback to original behaviour
+ * and allocate space for mempool and it's elements as one big chunk of
+ * physically continuos memory.
+ */
+struct rte_mempool *
+rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
+ int socket_id, unsigned flags, void *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)
+{
+ struct rte_mempool *mp = NULL;
+ int ret;
+
+ /* no virtual address supplied, use rte_mempool_create() */
+ if (vaddr == NULL)
+ return rte_mempool_create(name, n, elt_size, cache_size,
+ private_data_size, mp_init, mp_init_arg,
+ obj_init, obj_init_arg, socket_id, flags);
+
+ /* check that we have both VA and PA */
+ if (paddr == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Check that pg_shift parameter is valid. */
+ if (pg_shift > MEMPOOL_PG_SHIFT_MAX) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
+ private_data_size, socket_id, flags);
+ if (mp == NULL)
+ return NULL;
+
+ /* call the mempool priv initializer */
+ if (mp_init)
+ mp_init(mp, mp_init_arg);
+
+ ret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift,
+ NULL, NULL);
+ if (ret < 0 || ret != (int)mp->size)
+ goto fail;
+
+ /* call the object initializers */
+ if (obj_init)
+ rte_mempool_obj_iter(mp, obj_init, obj_init_arg);
+
+ return mp;
+
+ fail:
+ rte_mempool_free(mp);
return NULL;
}
/* Return the number of entries in the mempool */
-unsigned
-rte_mempool_count(const struct rte_mempool *mp)
+unsigned int
+rte_mempool_avail_count(const struct rte_mempool *mp)
{
unsigned count;
+ unsigned lcore_id;
- count = rte_ring_count(mp->ring);
+ count = rte_mempool_ops_get_count(mp);
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- {
- unsigned lcore_id;
- if (mp->cache_size == 0)
- return count;
+ if (mp->cache_size == 0)
+ return count;
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
- count += mp->local_cache[lcore_id].len;
- }
-#endif
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+ count += mp->local_cache[lcore_id].len;
/*
* due to race condition (access to len is not locked), the
@@ -666,116 +983,168 @@ rte_mempool_count(const struct rte_mempool *mp)
return count;
}
+/* return the number of entries allocated from the mempool */
+unsigned int
+rte_mempool_in_use_count(const struct rte_mempool *mp)
+{
+ return mp->size - rte_mempool_avail_count(mp);
+}
+
+unsigned int
+rte_mempool_count(const struct rte_mempool *mp)
+{
+ return rte_mempool_avail_count(mp);
+}
+
/* dump the cache status */
static unsigned
rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
{
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
unsigned lcore_id;
unsigned count = 0;
unsigned cache_count;
- fprintf(f, " cache infos:\n");
+ fprintf(f, " internal cache infos:\n");
fprintf(f, " cache_size=%"PRIu32"\n", mp->cache_size);
+
+ if (mp->cache_size == 0)
+ return count;
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
cache_count = mp->local_cache[lcore_id].len;
- fprintf(f, " cache_count[%u]=%u\n", lcore_id, cache_count);
+ fprintf(f, " cache_count[%u]=%"PRIu32"\n",
+ lcore_id, cache_count);
count += cache_count;
}
fprintf(f, " total_cache_count=%u\n", count);
return count;
-#else
- RTE_SET_USED(mp);
- fprintf(f, " cache disabled\n");
- return 0;
-#endif
}
-#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-/* check cookies before and after objects */
#ifndef __INTEL_COMPILER
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
-struct mempool_audit_arg {
- const struct rte_mempool *mp;
- uintptr_t obj_end;
- uint32_t obj_num;
-};
-
-static void
-mempool_obj_audit(void *arg, void *start, void *end, uint32_t idx)
+/* check and update cookies or panic (internal) */
+void rte_mempool_check_cookies(const struct rte_mempool *mp,
+ void * const *obj_table_const, unsigned n, int free)
{
- struct mempool_audit_arg *pa = arg;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mempool_objtlr *tlr;
+ uint64_t cookie;
+ void *tmp;
void *obj;
-
- obj = (char *)start + pa->mp->header_size;
- pa->obj_end = (uintptr_t)end;
- pa->obj_num = idx + 1;
- __mempool_check_cookies(pa->mp, &obj, 1, 2);
+ void **obj_table;
+
+ /* Force to drop the "const" attribute. This is done only when
+ * DEBUG is enabled */
+ tmp = (void *) obj_table_const;
+ obj_table = (void **) tmp;
+
+ while (n--) {
+ obj = obj_table[n];
+
+ if (rte_mempool_from_obj(obj) != mp)
+ rte_panic("MEMPOOL: object is owned by another "
+ "mempool\n");
+
+ hdr = __mempool_get_header(obj);
+ cookie = hdr->cookie;
+
+ if (free == 0) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (put)\n");
+ }
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
+ } else if (free == 1) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (get)\n");
+ }
+ hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1;
+ } else if (free == 2) {
+ if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
+ cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
+ rte_panic("MEMPOOL: bad header cookie (audit)\n");
+ }
+ }
+ tlr = __mempool_get_trailer(obj);
+ cookie = tlr->cookie;
+ if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
+ RTE_LOG(CRIT, MEMPOOL,
+ "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
+ obj, (const void *) mp, cookie);
+ rte_panic("MEMPOOL: bad trailer cookie\n");
+ }
+ }
+#else
+ RTE_SET_USED(mp);
+ RTE_SET_USED(obj_table_const);
+ RTE_SET_USED(n);
+ RTE_SET_USED(free);
+#endif
}
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
static void
-mempool_audit_cookies(const struct rte_mempool *mp)
+mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque,
+ void *obj, __rte_unused unsigned idx)
{
- uint32_t elt_sz, num;
- struct mempool_audit_arg arg;
-
- elt_sz = mp->elt_size + mp->header_size + mp->trailer_size;
-
- arg.mp = mp;
- arg.obj_end = mp->elt_va_start;
- arg.obj_num = 0;
+ __mempool_check_cookies(mp, &obj, 1, 2);
+}
- num = rte_mempool_obj_iter((void *)mp->elt_va_start,
- mp->size, elt_sz, 1,
- mp->elt_pa, mp->pg_num, mp->pg_shift,
- mempool_obj_audit, &arg);
+static void
+mempool_audit_cookies(struct rte_mempool *mp)
+{
+ unsigned num;
+ num = rte_mempool_obj_iter(mp, mempool_obj_audit, NULL);
if (num != mp->size) {
- rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
+ rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
"iterated only over %u elements\n",
mp, mp->size, num);
- } else if (arg.obj_end != mp->elt_va_end || arg.obj_num != mp->size) {
- rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
- "last callback va_end: %#tx (%#tx expeceted), "
- "num of objects: %u (%u expected)\n",
- mp, mp->size,
- arg.obj_end, mp->elt_va_end,
- arg.obj_num, mp->size);
}
}
+#else
+#define mempool_audit_cookies(mp) do {} while(0)
+#endif
#ifndef __INTEL_COMPILER
#pragma GCC diagnostic error "-Wcast-qual"
#endif
-#else
-#define mempool_audit_cookies(mp) do {} while(0)
-#endif
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
/* check cookies before and after objects */
static void
mempool_audit_cache(const struct rte_mempool *mp)
{
/* check cache size consistency */
unsigned lcore_id;
+
+ if (mp->cache_size == 0)
+ return;
+
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- if (mp->local_cache[lcore_id].len > mp->cache_flushthresh) {
+ const struct rte_mempool_cache *cache;
+ cache = &mp->local_cache[lcore_id];
+ if (cache->len > cache->flushthresh) {
RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
lcore_id);
rte_panic("MEMPOOL: invalid cache len\n");
}
}
}
-#else
-#define mempool_audit_cache(mp) do {} while(0)
-#endif
-
/* check the consistency of mempool (size, cookies, ...) */
void
-rte_mempool_audit(const struct rte_mempool *mp)
+rte_mempool_audit(struct rte_mempool *mp)
{
mempool_audit_cache(mp);
mempool_audit_cookies(mp);
@@ -786,23 +1155,27 @@ rte_mempool_audit(const struct rte_mempool *mp)
/* dump the status of the mempool on the console */
void
-rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
+rte_mempool_dump(FILE *f, struct rte_mempool *mp)
{
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
struct rte_mempool_debug_stats sum;
unsigned lcore_id;
#endif
+ struct rte_mempool_memhdr *memhdr;
unsigned common_count;
unsigned cache_count;
+ size_t mem_len = 0;
- RTE_VERIFY(f != NULL);
- RTE_VERIFY(mp != NULL);
+ RTE_ASSERT(f != NULL);
+ RTE_ASSERT(mp != NULL);
fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
fprintf(f, " flags=%x\n", mp->flags);
- fprintf(f, " ring=<%s>@%p\n", mp->ring->name, mp->ring);
- fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->phys_addr);
+ fprintf(f, " pool=%p\n", mp->pool_data);
+ fprintf(f, " phys_addr=0x%" PRIx64 "\n", mp->mz->phys_addr);
+ fprintf(f, " nb_mem_chunks=%u\n", mp->nb_mem_chunks);
fprintf(f, " size=%"PRIu32"\n", mp->size);
+ fprintf(f, " populated_size=%"PRIu32"\n", mp->populated_size);
fprintf(f, " header_size=%"PRIu32"\n", mp->header_size);
fprintf(f, " elt_size=%"PRIu32"\n", mp->elt_size);
fprintf(f, " trailer_size=%"PRIu32"\n", mp->trailer_size);
@@ -810,20 +1183,16 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
mp->header_size + mp->elt_size + mp->trailer_size);
fprintf(f, " private_data_size=%"PRIu32"\n", mp->private_data_size);
- fprintf(f, " pg_num=%"PRIu32"\n", mp->pg_num);
- fprintf(f, " pg_shift=%"PRIu32"\n", mp->pg_shift);
- fprintf(f, " pg_mask=%#tx\n", mp->pg_mask);
- fprintf(f, " elt_va_start=%#tx\n", mp->elt_va_start);
- fprintf(f, " elt_va_end=%#tx\n", mp->elt_va_end);
- fprintf(f, " elt_pa[0]=0x%" PRIx64 "\n", mp->elt_pa[0]);
-
- if (mp->size != 0)
+
+ STAILQ_FOREACH(memhdr, &mp->mem_list, next)
+ mem_len += memhdr->len;
+ if (mem_len != 0) {
fprintf(f, " avg bytes/object=%#Lf\n",
- (long double)(mp->elt_va_end - mp->elt_va_start) /
- mp->size);
+ (long double)mem_len / mp->size);
+ }
cache_count = rte_mempool_dump_cache(f, mp);
- common_count = rte_ring_count(mp->ring);
+ common_count = rte_mempool_ops_get_count(mp);
if ((cache_count + common_count) > mp->size)
common_count = mp->size - cache_count;
fprintf(f, " common_pool_count=%u\n", common_count);
@@ -857,7 +1226,7 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)
void
rte_mempool_list_dump(FILE *f)
{
- const struct rte_mempool *mp = NULL;
+ struct rte_mempool *mp = NULL;
struct rte_tailq_entry *te;
struct rte_mempool_list *mempool_list;
@@ -901,7 +1270,7 @@ rte_mempool_lookup(const char *name)
return mp;
}
-void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),
+void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),
void *arg)
{
struct rte_tailq_entry *te = NULL;
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 9745bf0d..fb7052e1 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -2,6 +2,7 @@
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 6WIND S.A.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,12 +67,14 @@
#include <inttypes.h>
#include <sys/queue.h>
+#include <rte_spinlock.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_lcore.h>
#include <rte_memory.h>
#include <rte_branch_prediction.h>
#include <rte_ring.h>
+#include <rte_memcpy.h>
#ifdef __cplusplus
extern "C" {
@@ -95,19 +98,19 @@ struct rte_mempool_debug_stats {
} __rte_cache_aligned;
#endif
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
/**
* A structure that stores a per-core object cache.
*/
struct rte_mempool_cache {
- unsigned len; /**< Cache len */
+ uint32_t size; /**< Size of the cache */
+ uint32_t flushthresh; /**< Threshold before we flush excess elements */
+ uint32_t len; /**< Current cache count */
/*
* Cache is allocated to this size to allow it to overflow in certain
* cases to avoid needless emptying of cache.
*/
void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
} __rte_cache_aligned;
-#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/**
* A structure that stores the size of mempool elements.
@@ -126,17 +129,6 @@ struct rte_mempool_objsz {
/* "MP_<name>" */
#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
-#ifdef RTE_LIBRTE_XEN_DOM0
-
-/* "<name>_MP_elt" */
-#define RTE_MEMPOOL_OBJ_NAME "%s_" RTE_MEMPOOL_MZ_PREFIX "elt"
-
-#else
-
-#define RTE_MEMPOOL_OBJ_NAME RTE_MEMPOOL_MZ_FORMAT
-
-#endif /* RTE_LIBRTE_XEN_DOM0 */
-
#define MEMPOOL_PG_SHIFT_MAX (sizeof(uintptr_t) * CHAR_BIT - 1)
/** Mempool over one chunk of physically continuous memory */
@@ -152,18 +144,26 @@ struct rte_mempool_objsz {
* Mempool object header structure
*
* Each object stored in mempools are prefixed by this header structure,
- * it allows to retrieve the mempool pointer from the object. When debug
- * is enabled, a cookie is also added in this structure preventing
- * corruptions and double-frees.
+ * it allows to retrieve the mempool pointer from the object and to
+ * iterate on all objects attached to a mempool. When debug is enabled,
+ * a cookie is also added in this structure preventing corruptions and
+ * double-frees.
*/
struct rte_mempool_objhdr {
+ STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */
struct rte_mempool *mp; /**< The mempool owning the object. */
+ phys_addr_t physaddr; /**< Physical address of the object. */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
uint64_t cookie; /**< Debug cookie. */
#endif
};
/**
+ * A list of object headers type
+ */
+STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
+
+/**
* Mempool object trailer structure
*
* In debug mode, each object stored in mempools are suffixed by this
@@ -176,53 +176,82 @@ struct rte_mempool_objtlr {
};
/**
+ * A list of memory where objects are stored
+ */
+STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
+
+/**
+ * Callback used to free a memory chunk
+ */
+typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr,
+ void *opaque);
+
+/**
+ * Mempool objects memory header structure
+ *
+ * The memory chunks where objects are stored. Each chunk is virtually
+ * and physically contiguous.
+ */
+struct rte_mempool_memhdr {
+ STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */
+ struct rte_mempool *mp; /**< The mempool owning the chunk */
+ void *addr; /**< Virtual address of the chunk */
+ phys_addr_t phys_addr; /**< Physical address of the chunk */
+ size_t len; /**< length of the chunk */
+ rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */
+ void *opaque; /**< Argument passed to the free callback */
+};
+
+/**
* The RTE mempool structure.
*/
struct rte_mempool {
char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
- struct rte_ring *ring; /**< Ring to store objects. */
- phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */
+ union {
+ void *pool_data; /**< Ring or pool to store objects. */
+ uint64_t pool_id; /**< External mempool identifier. */
+ };
+ void *pool_config; /**< optional args for ops alloc. */
+ const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */
int flags; /**< Flags of the mempool. */
- uint32_t size; /**< Size of the mempool. */
- uint32_t cache_size; /**< Size of per-lcore local cache. */
- uint32_t cache_flushthresh;
- /**< Threshold before we flush excess elements. */
+ int socket_id; /**< Socket id passed at create. */
+ uint32_t size; /**< Max size of the mempool. */
+ uint32_t cache_size;
+ /**< Size of per-lcore default local cache. */
uint32_t elt_size; /**< Size of an element. */
uint32_t header_size; /**< Size of header (before elt). */
uint32_t trailer_size; /**< Size of trailer (after elt). */
unsigned private_data_size; /**< Size of private data. */
+ /**
+ * Index into rte_mempool_ops_table array of mempool ops
+ * structs, which contain callback function pointers.
+ * We're using an index here rather than pointers to the callbacks
+ * to facilitate any secondary processes that may want to use
+ * this mempool.
+ */
+ int32_t ops_index;
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- /** Per-lcore local cache. */
- struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
-#endif
+ struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */
+
+ uint32_t populated_size; /**< Number of populated objects. */
+ struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */
+ uint32_t nb_mem_chunks; /**< Number of memory chunks */
+ struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
/** Per-lcore statistics. */
struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
#endif
-
- /* Address translation support, starts from next cache line. */
-
- /** Number of elements in the elt_pa array. */
- uint32_t pg_num __rte_cache_aligned;
- uint32_t pg_shift; /**< LOG2 of the physical pages. */
- uintptr_t pg_mask; /**< physical page mask value. */
- uintptr_t elt_va_start;
- /**< Virtual address of the first mempool object. */
- uintptr_t elt_va_end;
- /**< Virtual address of the <size + 1> mempool object. */
- phys_addr_t elt_pa[MEMPOOL_PG_NUM_DEFAULT];
- /**< Array of physical page addresses for the mempool objects buffer. */
-
} __rte_cache_aligned;
-#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */
+#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread among memory channels. */
#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
+#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */
+#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */
/**
* @internal When debug is enabled, store some statistics.
@@ -251,24 +280,18 @@ struct rte_mempool {
*
* @param mp
* Pointer to the memory pool.
- * @param pgn
- * Number of pages used to store mempool objects.
- */
-#define MEMPOOL_HEADER_SIZE(mp, pgn) (sizeof(*(mp)) + \
- RTE_ALIGN_CEIL(((pgn) - RTE_DIM((mp)->elt_pa)) * \
- sizeof ((mp)->elt_pa[0]), RTE_CACHE_LINE_SIZE))
-
-/**
- * Return true if the whole mempool is in contiguous memory.
+ * @param cs
+ * Size of the per-lcore cache.
*/
-#define MEMPOOL_IS_CONTIG(mp) \
- ((mp)->pg_num == MEMPOOL_PG_NUM_DEFAULT && \
- (mp)->phys_addr == (mp)->elt_pa[0])
+#define MEMPOOL_HEADER_SIZE(mp, cs) \
+ (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
+ (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
/* return the header of a mempool object (internal) */
static inline struct rte_mempool_objhdr *__mempool_get_header(void *obj)
{
- return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, sizeof(struct rte_mempool_objhdr));
+ return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
+ sizeof(struct rte_mempool_objhdr));
}
/**
@@ -307,141 +330,242 @@ static inline struct rte_mempool_objtlr *__mempool_get_trailer(void *obj)
* - 1: object is supposed to be free, mark it as allocated
* - 2: just check that cookie is valid (free or allocated)
*/
+void rte_mempool_check_cookies(const struct rte_mempool *mp,
+ void * const *obj_table_const, unsigned n, int free);
+
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
-#ifndef __INTEL_COMPILER
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif
-static inline void __mempool_check_cookies(const struct rte_mempool *mp,
- void * const *obj_table_const,
- unsigned n, int free)
-{
- struct rte_mempool_objhdr *hdr;
- struct rte_mempool_objtlr *tlr;
- uint64_t cookie;
- void *tmp;
- void *obj;
- void **obj_table;
-
- /* Force to drop the "const" attribute. This is done only when
- * DEBUG is enabled */
- tmp = (void *) obj_table_const;
- obj_table = (void **) tmp;
-
- while (n--) {
- obj = obj_table[n];
-
- if (rte_mempool_from_obj(obj) != mp)
- rte_panic("MEMPOOL: object is owned by another "
- "mempool\n");
-
- hdr = __mempool_get_header(obj);
- cookie = hdr->cookie;
-
- if (free == 0) {
- if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
- rte_log_set_history(0);
- RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
- obj, (const void *) mp, cookie);
- rte_panic("MEMPOOL: bad header cookie (put)\n");
- }
- hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
- }
- else if (free == 1) {
- if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
- rte_log_set_history(0);
- RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
- obj, (const void *) mp, cookie);
- rte_panic("MEMPOOL: bad header cookie (get)\n");
- }
- hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1;
- }
- else if (free == 2) {
- if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
- cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
- rte_log_set_history(0);
- RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
- obj, (const void *) mp, cookie);
- rte_panic("MEMPOOL: bad header cookie (audit)\n");
- }
- }
- tlr = __mempool_get_trailer(obj);
- cookie = tlr->cookie;
- if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
- rte_log_set_history(0);
- RTE_LOG(CRIT, MEMPOOL,
- "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
- obj, (const void *) mp, cookie);
- rte_panic("MEMPOOL: bad trailer cookie\n");
- }
- }
-}
-#ifndef __INTEL_COMPILER
-#pragma GCC diagnostic error "-Wcast-qual"
-#endif
+#define __mempool_check_cookies(mp, obj_table_const, n, free) \
+ rte_mempool_check_cookies(mp, obj_table_const, n, free)
#else
#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+#define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
+
+/**
+ * Prototype for implementation specific data provisioning function.
+ *
+ * The function should provide the implementation specific memory for
+ * for use by the other mempool ops functions in a given mempool ops struct.
+ * E.g. the default ops provides an instance of the rte_ring for this purpose.
+ * it will most likely point to a different type of data structure, and
+ * will be transparent to the application programmer.
+ * This function should set mp->pool_data.
+ */
+typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
+
+/**
+ * Free the opaque private data pointed to by mp->pool_data pointer.
+ */
+typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
+
+/**
+ * Enqueue an object into the external pool.
+ */
+typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
+ void * const *obj_table, unsigned int n);
+
/**
- * A mempool object iterator callback function.
+ * Dequeue an object from the external pool.
*/
-typedef void (*rte_mempool_obj_iter_t)(void * /*obj_iter_arg*/,
- void * /*obj_start*/,
- void * /*obj_end*/,
- uint32_t /*obj_index */);
+typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
+ void **obj_table, unsigned int n);
/**
- * Call a function for each mempool object in a memory chunk
+ * Return the number of available objects in the external pool.
+ */
+typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
+
+/** Structure defining mempool operations structure */
+struct rte_mempool_ops {
+ char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
+ rte_mempool_alloc_t alloc; /**< Allocate private data. */
+ rte_mempool_free_t free; /**< Free the external pool. */
+ rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */
+ rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
+ rte_mempool_get_count get_count; /**< Get qty of available objs. */
+} __rte_cache_aligned;
+
+#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
+
+/**
+ * Structure storing the table of registered ops structs, each of which contain
+ * the function pointers for the mempool ops functions.
+ * Each process has its own storage for this ops struct array so that
+ * the mempools can be shared across primary and secondary processes.
+ * The indices used to access the array are valid across processes, whereas
+ * any function pointers stored directly in the mempool struct would not be.
+ * This results in us simply having "ops_index" in the mempool struct.
+ */
+struct rte_mempool_ops_table {
+ rte_spinlock_t sl; /**< Spinlock for add/delete. */
+ uint32_t num_ops; /**< Number of used ops structs in the table. */
+ /**
+ * Storage for all possible ops structs.
+ */
+ struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
+} __rte_cache_aligned;
+
+/** Array of registered ops structs. */
+extern struct rte_mempool_ops_table rte_mempool_ops_table;
+
+/**
+ * @internal Get the mempool ops struct from its index.
*
- * Iterate across objects of the given size and alignment in the
- * provided chunk of memory. The given memory buffer can consist of
- * disjointed physical pages.
+ * @param ops_index
+ * The index of the ops struct in the ops struct table. It must be a valid
+ * index: (0 <= idx < num_ops).
+ * @return
+ * The pointer to the ops struct in the table.
+ */
+static inline struct rte_mempool_ops *
+rte_mempool_get_ops(int ops_index)
+{
+ RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
+
+ return &rte_mempool_ops_table.ops[ops_index];
+}
+
+/**
+ * @internal Wrapper for mempool_ops alloc callback.
*
- * For each object, call the provided callback (if any). This function
- * is used to populate a mempool, or walk through all the elements of a
- * mempool, or estimate how many elements of the given size could be
- * created in the given memory buffer.
+ * @param mp
+ * Pointer to the memory pool.
+ * @return
+ * - 0: Success; successfully allocated mempool pool_data.
+ * - <0: Error; code of alloc function.
+ */
+int
+rte_mempool_ops_alloc(struct rte_mempool *mp);
+
+/**
+ * @internal Wrapper for mempool_ops dequeue callback.
*
- * @param vaddr
- * Virtual address of the memory buffer.
- * @param elt_num
- * Maximum number of objects to iterate through.
- * @param elt_sz
- * Size of each object.
- * @param align
- * Alignment of each object.
- * @param paddr
- * Array of physical addresses of the pages that comprises given memory
- * buffer.
- * @param pg_num
- * Number of elements in the paddr array.
- * @param pg_shift
- * LOG2 of the physical pages size.
- * @param obj_iter
- * Object iterator callback function (could be NULL).
- * @param obj_iter_arg
- * User defined parameter for the object iterator callback function.
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Number of objects to get.
+ * @return
+ * - 0: Success; got n objects.
+ * - <0: Error; code of dequeue function.
+ */
+static inline int
+rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
+ void **obj_table, unsigned n)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->dequeue(mp, obj_table, n);
+}
+
+/**
+ * @internal wrapper for mempool_ops enqueue callback.
*
+ * @param mp
+ * Pointer to the memory pool.
+ * @param obj_table
+ * Pointer to a table of void * pointers (objects).
+ * @param n
+ * Number of objects to put.
* @return
- * Number of objects iterated through.
+ * - 0: Success; n objects supplied.
+ * - <0: Error; code of enqueue function.
*/
-uint32_t rte_mempool_obj_iter(void *vaddr,
- uint32_t elt_num, size_t elt_sz, size_t align,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
- rte_mempool_obj_iter_t obj_iter, void *obj_iter_arg);
+static inline int
+rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->enqueue(mp, obj_table, n);
+}
/**
- * An object constructor callback function for mempool.
+ * @internal wrapper for mempool_ops get_count callback.
*
- * Arguments are the mempool, the opaque pointer given by the user in
- * rte_mempool_create(), the pointer to the element and the index of
- * the element in the pool.
+ * @param mp
+ * Pointer to the memory pool.
+ * @return
+ * The number of available objects in the external pool.
*/
-typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *,
- void *, unsigned);
+unsigned
+rte_mempool_ops_get_count(const struct rte_mempool *mp);
+
+/**
+ * @internal wrapper for mempool_ops free callback.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ */
+void
+rte_mempool_ops_free(struct rte_mempool *mp);
+
+/**
+ * Set the ops of a mempool.
+ *
+ * This can only be done on a mempool that is not populated, i.e. just after
+ * a call to rte_mempool_create_empty().
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param name
+ * Name of the ops structure to use for this mempool.
+ * @param pool_config
+ * Opaque data that can be passed by the application to the ops functions.
+ * @return
+ * - 0: Success; the mempool is now using the requested ops functions.
+ * - -EINVAL - Invalid ops struct name provided.
+ * - -EEXIST - mempool already has an ops struct assigned.
+ */
+int
+rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
+ void *pool_config);
+
+/**
+ * Register mempool operations.
+ *
+ * @param ops
+ * Pointer to an ops structure to register.
+ * @return
+ * - >=0: Success; return the index of the ops struct in the table.
+ * - -EINVAL - some missing callbacks while registering ops struct.
+ * - -ENOSPC - the maximum number of ops structs has been reached.
+ */
+int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
+
+/**
+ * Macro to statically register the ops of a mempool handler.
+ * Note that the rte_mempool_register_ops fails silently here when
+ * more then RTE_MEMPOOL_MAX_OPS_IDX is registered.
+ */
+#define MEMPOOL_REGISTER_OPS(ops) \
+ void mp_hdlr_init_##ops(void); \
+ void __attribute__((constructor, used)) mp_hdlr_init_##ops(void)\
+ { \
+ rte_mempool_register_ops(&ops); \
+ }
+
+/**
+ * An object callback function for mempool.
+ *
+ * Used by rte_mempool_create() and rte_mempool_obj_iter().
+ */
+typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
+ void *opaque, void *obj, unsigned obj_idx);
+typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
+
+/**
+ * A memory callback function for mempool.
+ *
+ * Used by rte_mempool_mem_iter().
+ */
+typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx);
/**
* A mempool constructor callback function.
@@ -522,6 +646,8 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
* - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
* when using rte_mempool_get() or rte_mempool_get_bulk() is
* "single-consumer". Otherwise, it is "multi-consumers".
+ * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't
+ * necessarilly be contiguous in physical memory.
* @return
* The pointer to the new allocated mempool, on success. NULL on error
* with rte_errno set appropriately. Possible rte_errno values include:
@@ -536,14 +662,15 @@ struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags);
/**
* Create a new mempool named *name* in memory.
*
- * This function uses ``memzone_reserve()`` to allocate memory. The
- * pool contains n elements of elt_size. Its size is set to n.
+ * The pool contains n elements of elt_size. Its size is set to n.
+ * This function uses ``memzone_reserve()`` to allocate the mempool header
+ * (and the objects if vaddr is NULL).
* Depending on the input parameters, mempool elements can be either allocated
* together with the mempool header, or an externally provided memory buffer
* could be used to store mempool objects. In later case, that external
@@ -558,18 +685,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* @param elt_size
* The size of each element.
* @param cache_size
- * If cache_size is non-zero, the rte_mempool library will try to
- * limit the accesses to the common lockless pool, by maintaining a
- * per-lcore object cache. This argument must be lower or equal to
- * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
- * cache_size to have "n modulo cache_size == 0": if this is
- * not the case, some elements will always stay in the pool and will
- * never be used. The access to the per-lcore table is of course
- * faster than the multi-producer/consumer pool. The cache can be
- * disabled if the cache_size argument is set to 0; it can be useful to
- * avoid losing objects in cache. Note that even if not used, the
- * memory space for cache is always reserved in a mempool structure,
- * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * Size of the cache. See rte_mempool_create() for details.
* @param private_data_size
* The size of the private data appended after the mempool
* structure. This is useful for storing some private data after the
@@ -583,35 +699,17 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* An opaque pointer to data that can be used in the mempool
* constructor function.
* @param obj_init
- * A function pointer that is called for each object at
- * initialization of the pool. The user can set some meta data in
- * objects if needed. This parameter can be NULL if not needed.
- * The obj_init() function takes the mempool pointer, the init_arg,
- * the object pointer and the object number as parameters.
+ * A function called for each object at initialization of the pool.
+ * See rte_mempool_create() for details.
* @param obj_init_arg
- * An opaque pointer to data that can be used as an argument for
- * each call to the object constructor function.
+ * An opaque pointer passed to the object constructor function.
* @param socket_id
* The *socket_id* argument is the socket identifier in the case of
* NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
* constraint for the reserved zone.
* @param flags
- * The *flags* arguments is an OR of following flags:
- * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
- * between channels in RAM: the pool allocator will add padding
- * between objects depending on the hardware configuration. See
- * Memory alignment constraints for details. If this flag is set,
- * the allocator will just align them to a cache line.
- * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
- * cache-aligned. This flag removes this constraint, and no
- * padding will be present between objects. This flag implies
- * MEMPOOL_F_NO_SPREAD.
- * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
- * when using rte_mempool_put() or rte_mempool_put_bulk() is
- * "single-producer". Otherwise, it is "multi-producers".
- * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
- * when using rte_mempool_get() or rte_mempool_get_bulk() is
- * "single-consumer". Otherwise, it is "multi-consumers".
+ * Flags controlling the behavior of the mempool. See
+ * rte_mempool_create() for details.
* @param vaddr
* Virtual address of the externally allocated memory buffer.
* Will be used to store mempool objects.
@@ -624,110 +722,219 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* LOG2 of the physical pages size.
* @return
* The pointer to the new allocated mempool, on success. NULL on error
- * with rte_errno set appropriately. Possible rte_errno values include:
- * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
- * - E_RTE_SECONDARY - function was called from a secondary process instance
- * - EINVAL - cache size provided is too large
- * - ENOSPC - the maximum number of memzones has already been allocated
- * - EEXIST - a memzone with the same name already exists
- * - ENOMEM - no appropriate memory area found in which to create memzone
+ * with rte_errno set appropriately. See rte_mempool_create() for details.
*/
struct rte_mempool *
rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
unsigned cache_size, unsigned private_data_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+ rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags, void *vaddr,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
/**
- * Create a new mempool named *name* in memory on Xen Dom0.
+ * Create an empty mempool
*
- * This function uses ``rte_mempool_xmem_create()`` to allocate memory. The
- * pool contains n elements of elt_size. Its size is set to n.
- * All elements of the mempool are allocated together with the mempool header,
- * and memory buffer can consist of set of disjoint physical pages.
+ * The mempool is allocated and initialized, but it is not populated: no
+ * memory is allocated for the mempool elements. The user has to call
+ * rte_mempool_populate_*() or to add memory chunks to the pool. Once
+ * populated, the user may also want to initialize each object with
+ * rte_mempool_obj_iter().
*
* @param name
* The name of the mempool.
* @param n
- * The number of elements in the mempool. The optimum size (in terms of
- * memory usage) for a mempool is when n is a power of two minus one:
- * n = (2^q - 1).
+ * The maximum number of elements that can be added in the mempool.
+ * The optimum size (in terms of memory usage) for a mempool is when n
+ * is a power of two minus one: n = (2^q - 1).
* @param elt_size
* The size of each element.
* @param cache_size
- * If cache_size is non-zero, the rte_mempool library will try to
- * limit the accesses to the common lockless pool, by maintaining a
- * per-lcore object cache. This argument must be lower or equal to
- * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
- * cache_size to have "n modulo cache_size == 0": if this is
- * not the case, some elements will always stay in the pool and will
- * never be used. The access to the per-lcore table is of course
- * faster than the multi-producer/consumer pool. The cache can be
- * disabled if the cache_size argument is set to 0; it can be useful to
- * avoid losing objects in cache. Note that even if not used, the
- * memory space for cache is always reserved in a mempool structure,
- * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * Size of the cache. See rte_mempool_create() for details.
* @param private_data_size
* The size of the private data appended after the mempool
* structure. This is useful for storing some private data after the
* mempool structure, as is done for rte_mbuf_pool for example.
- * @param mp_init
- * A function pointer that is called for initialization of the pool,
- * before object initialization. The user can initialize the private
- * data in this function if needed. This parameter can be NULL if
- * not needed.
- * @param mp_init_arg
- * An opaque pointer to data that can be used in the mempool
- * constructor function.
- * @param obj_init
- * A function pointer that is called for each object at
- * initialization of the pool. The user can set some meta data in
- * objects if needed. This parameter can be NULL if not needed.
- * The obj_init() function takes the mempool pointer, the init_arg,
- * the object pointer and the object number as parameters.
- * @param obj_init_arg
- * An opaque pointer to data that can be used as an argument for
- * each call to the object constructor function.
* @param socket_id
* The *socket_id* argument is the socket identifier in the case of
* NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
* constraint for the reserved zone.
* @param flags
- * The *flags* arguments is an OR of following flags:
- * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
- * between channels in RAM: the pool allocator will add padding
- * between objects depending on the hardware configuration. See
- * Memory alignment constraints for details. If this flag is set,
- * the allocator will just align them to a cache line.
- * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
- * cache-aligned. This flag removes this constraint, and no
- * padding will be present between objects. This flag implies
- * MEMPOOL_F_NO_SPREAD.
- * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
- * when using rte_mempool_put() or rte_mempool_put_bulk() is
- * "single-producer". Otherwise, it is "multi-producers".
- * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
- * when using rte_mempool_get() or rte_mempool_get_bulk() is
- * "single-consumer". Otherwise, it is "multi-consumers".
+ * Flags controlling the behavior of the mempool. See
+ * rte_mempool_create() for details.
* @return
* The pointer to the new allocated mempool, on success. NULL on error
- * with rte_errno set appropriately. Possible rte_errno values include:
- * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
- * - E_RTE_SECONDARY - function was called from a secondary process instance
- * - EINVAL - cache size provided is too large
- * - ENOSPC - the maximum number of memzones has already been allocated
- * - EEXIST - a memzone with the same name already exists
- * - ENOMEM - no appropriate memory area found in which to create memzone
+ * with rte_errno set appropriately. See rte_mempool_create() for details.
*/
struct rte_mempool *
-rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags);
+rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
+ unsigned cache_size, unsigned private_data_size,
+ int socket_id, unsigned flags);
+/**
+ * Free a mempool
+ *
+ * Unlink the mempool from global list, free the memory chunks, and all
+ * memory referenced by the mempool. The objects must not be used by
+ * other cores as they will be freed.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ */
+void
+rte_mempool_free(struct rte_mempool *mp);
+
+/**
+ * Add physically contiguous memory for objects in the pool at init
+ *
+ * Add a virtually and physically contiguous memory chunk in the pool
+ * where objects can be instanciated.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param paddr
+ * The physical address
+ * @param len
+ * The length of memory in bytes.
+ * @param free_cb
+ * The callback used to free this chunk when destroying the mempool.
+ * @param opaque
+ * An opaque argument passed to free_cb.
+ * @return
+ * The number of objects added on success.
+ * On error, the chunk is not added in the memory list of the
+ * mempool and a negative errno is returned.
+ */
+int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+ phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+/**
+ * Add physical memory for objects in the pool at init
+ *
+ * Add a virtually contiguous memory chunk in the pool where objects can
+ * be instanciated. The physical addresses corresponding to the virtual
+ * area are described in paddr[], pg_num, pg_shift.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param paddr
+ * An array of physical addresses of each page composing the virtual
+ * area.
+ * @param pg_num
+ * Number of elements in the paddr array.
+ * @param pg_shift
+ * LOG2 of the physical pages size.
+ * @param free_cb
+ * The callback used to free this chunk when destroying the mempool.
+ * @param opaque
+ * An opaque argument passed to free_cb.
+ * @return
+ * The number of objects added on success.
+ * On error, the chunks are not added in the memory list of the
+ * mempool and a negative errno is returned.
+ */
+int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+ const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
+ rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
+
+/**
+ * Add virtually contiguous memory for objects in the pool at init
+ *
+ * Add a virtually contiguous memory chunk in the pool where objects can
+ * be instanciated.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param addr
+ * The virtual address of memory that should be used to store objects.
+ * Must be page-aligned.
+ * @param len
+ * The length of memory in bytes. Must be page-aligned.
+ * @param pg_sz
+ * The size of memory pages in this virtual area.
+ * @param free_cb
+ * The callback used to free this chunk when destroying the mempool.
+ * @param opaque
+ * An opaque argument passed to free_cb.
+ * @return
+ * The number of objects added on success.
+ * On error, the chunk is not added in the memory list of the
+ * mempool and a negative errno is returned.
+ */
+int
+rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
+ size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
+ void *opaque);
+
+/**
+ * Add memory for objects in the pool at init
+ *
+ * This is the default function used by rte_mempool_create() to populate
+ * the mempool. It adds memory allocated using rte_memzone_reserve().
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of objects added on success.
+ * On error, the chunk is not added in the memory list of the
+ * mempool and a negative errno is returned.
+ */
+int rte_mempool_populate_default(struct rte_mempool *mp);
+
+/**
+ * Add memory from anonymous mapping for objects in the pool at init
+ *
+ * This function mmap an anonymous memory zone that is locked in
+ * memory to store the objects of the mempool.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of objects added on success.
+ * On error, the chunk is not added in the memory list of the
+ * mempool and a negative errno is returned.
+ */
+int rte_mempool_populate_anon(struct rte_mempool *mp);
+
+/**
+ * Call a function for each mempool element
+ *
+ * Iterate across all objects attached to a rte_mempool and call the
+ * callback function on it.
+ *
+ * @param mp
+ * A pointer to an initialized mempool.
+ * @param obj_cb
+ * A function pointer that is called for each object.
+ * @param obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * Number of objects iterated.
+ */
+uint32_t rte_mempool_obj_iter(struct rte_mempool *mp,
+ rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * Call a function for each mempool memory chunk
+ *
+ * Iterate across all memory chunks attached to a rte_mempool and call
+ * the callback function on it.
+ *
+ * @param mp
+ * A pointer to an initialized mempool.
+ * @param mem_cb
+ * A function pointer that is called for each memory chunk.
+ * @param mem_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * Number of memory chunks iterated.
+ */
+uint32_t rte_mempool_mem_iter(struct rte_mempool *mp,
+ rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
/**
* Dump the status of the mempool to the console.
@@ -737,7 +944,71 @@ rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
* @param mp
* A pointer to the mempool structure.
*/
-void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
+void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
+
+/**
+ * Create a user-owned mempool cache.
+ *
+ * This can be used by non-EAL threads to enable caching when they
+ * interact with a mempool.
+ *
+ * @param size
+ * The size of the mempool cache. See rte_mempool_create()'s cache_size
+ * parameter description for more information. The same limits and
+ * considerations apply here too.
+ * @param socket_id
+ * The socket identifier in the case of NUMA. The value can be
+ * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size, int socket_id);
+
+/**
+ * Free a user-owned mempool cache.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ */
+void
+rte_mempool_cache_free(struct rte_mempool_cache *cache);
+
+/**
+ * Flush a user-owned mempool cache to the specified mempool.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ * @param mp
+ * A pointer to the mempool.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_cache_flush(struct rte_mempool_cache *cache,
+ struct rte_mempool *mp)
+{
+ rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
+ cache->len = 0;
+}
+
+/**
+ * Get a pointer to the per-lcore default mempool cache.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param lcore_id
+ * The logical core id.
+ * @return
+ * A pointer to the mempool cache or NULL if disabled or non-EAL thread.
+ */
+static inline struct rte_mempool_cache *__attribute__((always_inline))
+rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
+{
+ if (mp->cache_size == 0)
+ return NULL;
+
+ if (lcore_id >= RTE_MAX_LCORE)
+ return NULL;
+
+ return &mp->local_cache[lcore_id];
+}
/**
* @internal Put several objects back in the mempool; used internally.
@@ -748,36 +1019,29 @@ void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
* @param n
* The number of objects to store back in the mempool, must be strictly
* positive.
- * @param is_mp
- * Mono-producer (0) or multi-producers (1).
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @param flags
+ * The flags used for the mempool creation.
+ * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
static inline void __attribute__((always_inline))
-__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, int is_mp)
+__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n, struct rte_mempool_cache *cache, int flags)
{
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- struct rte_mempool_cache *cache;
- uint32_t index;
void **cache_objs;
- unsigned lcore_id = rte_lcore_id();
- uint32_t cache_size = mp->cache_size;
- uint32_t flushthresh = mp->cache_flushthresh;
-#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* increment stat now, adding in mempool always success */
__MEMPOOL_STAT_ADD(mp, put, n);
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- /* cache is not enabled or single producer or non-EAL thread */
- if (unlikely(cache_size == 0 || is_mp == 0 ||
- lcore_id >= RTE_MAX_LCORE))
+ /* No cache provided or single producer */
+ if (unlikely(cache == NULL || flags & MEMPOOL_F_SP_PUT))
goto ring_enqueue;
/* Go straight to ring if put would overflow mem allocated for cache */
if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
goto ring_enqueue;
- cache = &mp->local_cache[lcore_id];
cache_objs = &cache->objs[cache->len];
/*
@@ -788,42 +1052,55 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
*/
/* Add elements back into the cache */
- for (index = 0; index < n; ++index, obj_table++)
- cache_objs[index] = *obj_table;
+ rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n);
cache->len += n;
- if (cache->len >= flushthresh) {
- rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
- cache->len - cache_size);
- cache->len = cache_size;
+ if (cache->len >= cache->flushthresh) {
+ rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size],
+ cache->len - cache->size);
+ cache->len = cache->size;
}
return;
ring_enqueue:
-#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* push remaining objects in ring */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
- if (is_mp) {
- if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
- rte_panic("cannot put objects in mempool\n");
- }
- else {
- if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
- rte_panic("cannot put objects in mempool\n");
- }
+ if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0)
+ rte_panic("cannot put objects in mempool\n");
#else
- if (is_mp)
- rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
- else
- rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
+ rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
#endif
}
/**
+ * Put several objects back in the mempool.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from the obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @param flags
+ * The flags used for the mempool creation.
+ * Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n, struct rte_mempool_cache *cache, int flags)
+{
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_generic_put(mp, obj_table, n, cache, flags);
+}
+
+/**
+ * @deprecated
* Put several objects back in the mempool (multi-producers safe).
*
* @param mp
@@ -833,15 +1110,18 @@ ring_enqueue:
* @param n
* The number of objects to add in the mempool from the obj_table.
*/
+__rte_deprecated
static inline void __attribute__((always_inline))
rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- __mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_put_bulk(mp, obj_table, n, 1);
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_generic_put(mp, obj_table, n, cache, 0);
}
/**
+ * @deprecated
* Put several objects back in the mempool (NOT multi-producers safe).
*
* @param mp
@@ -851,12 +1131,12 @@ rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
* @param n
* The number of objects to add in the mempool from obj_table.
*/
-static inline void
+__rte_deprecated
+static inline void __attribute__((always_inline))
rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- __mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_put_bulk(mp, obj_table, n, 0);
+ rte_mempool_generic_put(mp, obj_table, n, NULL, MEMPOOL_F_SP_PUT);
}
/**
@@ -877,11 +1157,13 @@ static inline void __attribute__((always_inline))
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- __mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_generic_put(mp, obj_table, n, cache, mp->flags);
}
/**
+ * @deprecated
* Put one object in the mempool (multi-producers safe).
*
* @param mp
@@ -889,13 +1171,17 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
* @param obj
* A pointer to the object to be added.
*/
+__rte_deprecated
static inline void __attribute__((always_inline))
rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
{
- rte_mempool_mp_put_bulk(mp, &obj, 1);
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ rte_mempool_generic_put(mp, &obj, 1, cache, 0);
}
/**
+ * @deprecated
* Put one object back in the mempool (NOT multi-producers safe).
*
* @param mp
@@ -903,10 +1189,11 @@ rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
* @param obj
* A pointer to the object to be added.
*/
+__rte_deprecated
static inline void __attribute__((always_inline))
rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
{
- rte_mempool_sp_put_bulk(mp, &obj, 1);
+ rte_mempool_generic_put(mp, &obj, 1, NULL, MEMPOOL_F_SP_PUT);
}
/**
@@ -935,39 +1222,38 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to get, must be strictly positive.
- * @param is_mc
- * Mono-consumer (0) or multi-consumers (1).
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @param flags
+ * The flags used for the mempool creation.
+ * Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
* @return
* - >=0: Success; number of objects supplied.
* - <0: Error; code of ring dequeue function.
*/
static inline int __attribute__((always_inline))
-__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
- unsigned n, int is_mc)
+__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
+ unsigned n, struct rte_mempool_cache *cache, int flags)
{
int ret;
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- struct rte_mempool_cache *cache;
uint32_t index, len;
void **cache_objs;
- unsigned lcore_id = rte_lcore_id();
- uint32_t cache_size = mp->cache_size;
- /* cache is not enabled or single consumer */
- if (unlikely(cache_size == 0 || is_mc == 0 ||
- n >= cache_size || lcore_id >= RTE_MAX_LCORE))
+ /* No cache provided or single consumer */
+ if (unlikely(cache == NULL || flags & MEMPOOL_F_SC_GET ||
+ n >= cache->size))
goto ring_dequeue;
- cache = &mp->local_cache[lcore_id];
cache_objs = cache->objs;
/* Can this be satisfied from the cache? */
if (cache->len < n) {
/* No. Backfill the cache first, and then fill from it */
- uint32_t req = n + (cache_size - cache->len);
+ uint32_t req = n + (cache->size - cache->len);
/* How many do we require i.e. number to fill the cache + the request */
- ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
+ ret = rte_mempool_ops_dequeue_bulk(mp,
+ &cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
* In the offchance that we are buffer constrained,
@@ -992,13 +1278,9 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
return 0;
ring_dequeue:
-#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* get remaining objects from ring */
- if (is_mc)
- ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
- else
- ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
+ ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n);
if (ret < 0)
__MEMPOOL_STAT_ADD(mp, get_fail, n);
@@ -1009,7 +1291,7 @@ ring_dequeue:
}
/**
- * Get several objects from the mempool (multi-consumers safe).
+ * Get several objects from the mempool.
*
* If cache is enabled, objects will be retrieved first from cache,
* subsequently from the common pool. Note that it can return -ENOENT when
@@ -1022,21 +1304,56 @@ ring_dequeue:
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to get from mempool to obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @param flags
+ * The flags used for the mempool creation.
+ * Single-consumer (MEMPOOL_F_SC_GET flag) or multi-consumers.
* @return
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
static inline int __attribute__((always_inline))
-rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
+ struct rte_mempool_cache *cache, int flags)
{
int ret;
- ret = __mempool_get_bulk(mp, obj_table, n, 1);
+ ret = __mempool_generic_get(mp, obj_table, n, cache, flags);
if (ret == 0)
__mempool_check_cookies(mp, obj_table, n, 1);
return ret;
}
/**
+ * @deprecated
+ * Get several objects from the mempool (multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from mempool to obj_table.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+__rte_deprecated
+static inline int __attribute__((always_inline))
+rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ return rte_mempool_generic_get(mp, obj_table, n, cache, 0);
+}
+
+/**
+ * @deprecated
* Get several objects from the mempool (NOT multi-consumers safe).
*
* If cache is enabled, objects will be retrieved first from cache,
@@ -1055,14 +1372,12 @@ rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
* - -ENOENT: Not enough entries in the mempool; no object is
* retrieved.
*/
+__rte_deprecated
static inline int __attribute__((always_inline))
rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- int ret;
- ret = __mempool_get_bulk(mp, obj_table, n, 0);
- if (ret == 0)
- __mempool_check_cookies(mp, obj_table, n, 1);
- return ret;
+ return rte_mempool_generic_get(mp, obj_table, n, NULL,
+ MEMPOOL_F_SC_GET);
}
/**
@@ -1090,15 +1405,13 @@ rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- int ret;
- ret = __mempool_get_bulk(mp, obj_table, n,
- !(mp->flags & MEMPOOL_F_SC_GET));
- if (ret == 0)
- __mempool_check_cookies(mp, obj_table, n, 1);
- return ret;
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ return rte_mempool_generic_get(mp, obj_table, n, cache, mp->flags);
}
/**
+ * @deprecated
* Get one object from the mempool (multi-consumers safe).
*
* If cache is enabled, objects will be retrieved first from cache,
@@ -1114,13 +1427,17 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
+__rte_deprecated
static inline int __attribute__((always_inline))
rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
{
- return rte_mempool_mc_get_bulk(mp, obj_p, 1);
+ struct rte_mempool_cache *cache;
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ return rte_mempool_generic_get(mp, obj_p, 1, cache, 0);
}
/**
+ * @deprecated
* Get one object from the mempool (NOT multi-consumers safe).
*
* If cache is enabled, objects will be retrieved first from cache,
@@ -1136,10 +1453,11 @@ rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
+__rte_deprecated
static inline int __attribute__((always_inline))
rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
{
- return rte_mempool_sc_get_bulk(mp, obj_p, 1);
+ return rte_mempool_generic_get(mp, obj_p, 1, NULL, MEMPOOL_F_SC_GET);
}
/**
@@ -1173,6 +1491,21 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
*
* When cache is enabled, this function has to browse the length of
* all lcores, so it should not be used in a data path, but only for
+ * debug purposes. User-owned mempool caches are not accounted for.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of entries in the mempool.
+ */
+unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
+
+/**
+ * @deprecated
+ * Return the number of entries in the mempool.
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
* debug purposes.
*
* @param mp
@@ -1180,9 +1513,26 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
* @return
* The number of entries in the mempool.
*/
+__rte_deprecated
unsigned rte_mempool_count(const struct rte_mempool *mp);
/**
+ * Return the number of elements which have been allocated from the mempool
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @return
+ * The number of free entries in the mempool.
+ */
+unsigned int
+rte_mempool_in_use_count(const struct rte_mempool *mp);
+
+/**
+ * @deprecated
* Return the number of free entries in the mempool ring.
* i.e. how many entries can be freed back to the mempool.
*
@@ -1192,17 +1542,18 @@ unsigned rte_mempool_count(const struct rte_mempool *mp);
*
* When cache is enabled, this function has to browse the length of
* all lcores, so it should not be used in a data path, but only for
- * debug purposes.
+ * debug purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
* @return
* The number of free entries in the mempool.
*/
+__rte_deprecated
static inline unsigned
rte_mempool_free_count(const struct rte_mempool *mp)
{
- return mp->size - rte_mempool_count(mp);
+ return rte_mempool_in_use_count(mp);
}
/**
@@ -1210,7 +1561,7 @@ rte_mempool_free_count(const struct rte_mempool *mp)
*
* When cache is enabled, this function has to browse the length of all
* lcores, so it should not be used in a data path, but only for debug
- * purposes.
+ * purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
@@ -1221,7 +1572,7 @@ rte_mempool_free_count(const struct rte_mempool *mp)
static inline int
rte_mempool_full(const struct rte_mempool *mp)
{
- return !!(rte_mempool_count(mp) == mp->size);
+ return !!(rte_mempool_avail_count(mp) == mp->size);
}
/**
@@ -1229,7 +1580,7 @@ rte_mempool_full(const struct rte_mempool *mp)
*
* When cache is enabled, this function has to browse the length of all
* lcores, so it should not be used in a data path, but only for debug
- * purposes.
+ * purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
@@ -1240,7 +1591,7 @@ rte_mempool_full(const struct rte_mempool *mp)
static inline int
rte_mempool_empty(const struct rte_mempool *mp)
{
- return !!(rte_mempool_count(mp) == 0);
+ return !!(rte_mempool_avail_count(mp) == 0);
}
/**
@@ -1252,23 +1603,16 @@ rte_mempool_empty(const struct rte_mempool *mp)
* A pointer (virtual address) to the element of the pool.
* @return
* The physical address of the elt element.
+ * If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the
+ * returned value is RTE_BAD_PHYS_ADDR.
*/
static inline phys_addr_t
-rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
+rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
{
- if (rte_eal_has_hugepages()) {
- uintptr_t off;
-
- off = (const char *)elt - (const char *)mp->elt_va_start;
- return mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask);
- } else {
- /*
- * If huge pages are disabled, we cannot assume the
- * memory region to be physically contiguous.
- * Lookup for each element.
- */
- return rte_mem_virt2phy(elt);
- }
+ const struct rte_mempool_objhdr *hdr;
+ hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
+ sizeof(*hdr));
+ return hdr->physaddr;
}
/**
@@ -1281,7 +1625,7 @@ rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
* @param mp
* A pointer to the mempool structure.
*/
-void rte_mempool_audit(const struct rte_mempool *mp);
+void rte_mempool_audit(struct rte_mempool *mp);
/**
* Return a pointer to the private data in an mempool structure.
@@ -1293,7 +1637,8 @@ void rte_mempool_audit(const struct rte_mempool *mp);
*/
static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
{
- return (char *)mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num);
+ return (char *)mp +
+ MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
}
/**
@@ -1325,7 +1670,7 @@ struct rte_mempool *rte_mempool_lookup(const char *name);
* calculates header, trailer, body and total sizes of the mempool object.
*
* @param elt_size
- * The size of each element.
+ * The size of each element, without header and trailer.
* @param flags
* The flags used for the mempool creation.
* Consult rte_mempool_create() for more information about possible values.
@@ -1351,14 +1696,15 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
*
* @param elt_num
* Number of elements.
- * @param elt_sz
- * The size of each element.
+ * @param total_elt_sz
+ * The size of each element, including header and trailer, as returned
+ * by rte_mempool_calc_obj_size().
* @param pg_shift
- * LOG2 of the physical pages size.
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
* @return
* Required memory size aligned at page boundary.
*/
-size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
+size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
uint32_t pg_shift);
/**
@@ -1372,8 +1718,9 @@ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
* Will be used to store mempool objects.
* @param elt_num
* Number of elements.
- * @param elt_sz
- * The size of each element.
+ * @param total_elt_sz
+ * The size of each element, including header and trailer, as returned
+ * by rte_mempool_calc_obj_size().
* @param paddr
* Array of physical addresses of the pages that comprises given memory
* buffer.
@@ -1387,8 +1734,9 @@ size_t rte_mempool_xmem_size(uint32_t elt_num, size_t elt_sz,
* buffer is too small, return a negative value whose absolute value
* is the actual number of elements that can be stored in that buffer.
*/
-ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
+ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
+ size_t total_elt_sz, const phys_addr_t paddr[], uint32_t pg_num,
+ uint32_t pg_shift);
/**
* Walk list of all memory pools
@@ -1398,7 +1746,7 @@ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
* @param arg
* Argument passed to iterator
*/
-void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),
+void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
void *arg);
#ifdef __cplusplus
diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c
new file mode 100644
index 00000000..fd0b64cf
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_mempool.h>
+#include <rte_errno.h>
+
+/* indirect jump table to support external memory pools. */
+struct rte_mempool_ops_table rte_mempool_ops_table = {
+ .sl = RTE_SPINLOCK_INITIALIZER,
+ .num_ops = 0
+};
+
+/* add a new ops struct in rte_mempool_ops_table, return its index. */
+int
+rte_mempool_register_ops(const struct rte_mempool_ops *h)
+{
+ struct rte_mempool_ops *ops;
+ int16_t ops_index;
+
+ rte_spinlock_lock(&rte_mempool_ops_table.sl);
+
+ if (rte_mempool_ops_table.num_ops >=
+ RTE_MEMPOOL_MAX_OPS_IDX) {
+ rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+ RTE_LOG(ERR, MEMPOOL,
+ "Maximum number of mempool ops structs exceeded\n");
+ return -ENOSPC;
+ }
+
+ if (h->alloc == NULL || h->enqueue == NULL ||
+ h->dequeue == NULL || h->get_count == NULL) {
+ rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+ RTE_LOG(ERR, MEMPOOL,
+ "Missing callback while registering mempool ops\n");
+ return -EINVAL;
+ }
+
+ if (strlen(h->name) >= sizeof(ops->name) - 1) {
+ rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+ RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n",
+ __func__, h->name);
+ rte_errno = EEXIST;
+ return -EEXIST;
+ }
+
+ ops_index = rte_mempool_ops_table.num_ops++;
+ ops = &rte_mempool_ops_table.ops[ops_index];
+ snprintf(ops->name, sizeof(ops->name), "%s", h->name);
+ ops->alloc = h->alloc;
+ ops->enqueue = h->enqueue;
+ ops->dequeue = h->dequeue;
+ ops->get_count = h->get_count;
+
+ rte_spinlock_unlock(&rte_mempool_ops_table.sl);
+
+ return ops_index;
+}
+
+/* wrapper to allocate an external mempool's private (pool) data. */
+int
+rte_mempool_ops_alloc(struct rte_mempool *mp)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->alloc(mp);
+}
+
+/* wrapper to free an external pool ops. */
+void
+rte_mempool_ops_free(struct rte_mempool *mp)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (ops->free == NULL)
+ return;
+ ops->free(mp);
+}
+
+/* wrapper to get available objects in an external mempool. */
+unsigned int
+rte_mempool_ops_get_count(const struct rte_mempool *mp)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ return ops->get_count(mp);
+}
+
+/* sets mempool ops previously registered by rte_mempool_register_ops. */
+int
+rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
+ void *pool_config)
+{
+ struct rte_mempool_ops *ops = NULL;
+ unsigned i;
+
+ /* too late, the mempool is already populated. */
+ if (mp->flags & MEMPOOL_F_POOL_CREATED)
+ return -EEXIST;
+
+ for (i = 0; i < rte_mempool_ops_table.num_ops; i++) {
+ if (!strcmp(name,
+ rte_mempool_ops_table.ops[i].name)) {
+ ops = &rte_mempool_ops_table.ops[i];
+ break;
+ }
+ }
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ mp->ops_index = i;
+ mp->pool_config = pool_config;
+ return 0;
+}
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/lib/librte_mempool/rte_mempool_ring.c
new file mode 100644
index 00000000..b9aa64dd
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_ring.c
@@ -0,0 +1,161 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+static int
+common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);
+}
+
+static int
+common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);
+}
+
+static int
+common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);
+}
+
+static int
+common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+ return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);
+}
+
+static unsigned
+common_ring_get_count(const struct rte_mempool *mp)
+{
+ return rte_ring_count(mp->pool_data);
+}
+
+
+static int
+common_ring_alloc(struct rte_mempool *mp)
+{
+ int rg_flags = 0, ret;
+ char rg_name[RTE_RING_NAMESIZE];
+ struct rte_ring *r;
+
+ ret = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT, mp->name);
+ if (ret < 0 || ret >= (int)sizeof(rg_name)) {
+ rte_errno = ENAMETOOLONG;
+ return -rte_errno;
+ }
+
+ /* ring flags */
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ /*
+ * Allocate the ring that will be used to store objects.
+ * Ring functions will return appropriate errors if we are
+ * running as a secondary process etc., so no checks made
+ * in this function for that condition.
+ */
+ r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id, rg_flags);
+ if (r == NULL)
+ return -rte_errno;
+
+ mp->pool_data = r;
+
+ return 0;
+}
+
+static void
+common_ring_free(struct rte_mempool *mp)
+{
+ rte_ring_free(mp->pool_data);
+}
+
+/*
+ * The following 4 declarations of mempool ops structs address
+ * the need for the backward compatible mempool handlers for
+ * single/multi producers and single/multi consumers as dictated by the
+ * flags provided to the rte_mempool_create function
+ */
+static const struct rte_mempool_ops ops_mp_mc = {
+ .name = "ring_mp_mc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_mp_enqueue,
+ .dequeue = common_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_sp_sc = {
+ .name = "ring_sp_sc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_sp_enqueue,
+ .dequeue = common_ring_sc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_mp_sc = {
+ .name = "ring_mp_sc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_mp_enqueue,
+ .dequeue = common_ring_sc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+static const struct rte_mempool_ops ops_sp_mc = {
+ .name = "ring_sp_mc",
+ .alloc = common_ring_alloc,
+ .free = common_ring_free,
+ .enqueue = common_ring_sp_enqueue,
+ .dequeue = common_ring_mc_dequeue,
+ .get_count = common_ring_get_count,
+};
+
+MEMPOOL_REGISTER_OPS(ops_mp_mc);
+MEMPOOL_REGISTER_OPS(ops_sp_sc);
+MEMPOOL_REGISTER_OPS(ops_mp_sc);
+MEMPOOL_REGISTER_OPS(ops_sp_mc);
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 00000000..5fd8af24
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,147 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+struct rte_mempool_stack {
+ rte_spinlock_t sl;
+
+ uint32_t size;
+ uint32_t len;
+ void *objs[];
+};
+
+static int
+stack_alloc(struct rte_mempool *mp)
+{
+ struct rte_mempool_stack *s;
+ unsigned n = mp->size;
+ int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+ /* Allocate our local memory structure */
+ s = rte_zmalloc_socket("mempool-stack",
+ size,
+ RTE_CACHE_LINE_SIZE,
+ mp->socket_id);
+ if (s == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+ return -ENOMEM;
+ }
+
+ rte_spinlock_init(&s->sl);
+
+ s->size = n;
+ mp->pool_data = s;
+
+ return 0;
+}
+
+static int
+stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n)
+{
+ struct rte_mempool_stack *s = mp->pool_data;
+ void **cache_objs;
+ unsigned index;
+
+ rte_spinlock_lock(&s->sl);
+ cache_objs = &s->objs[s->len];
+
+ /* Is there sufficient space in the stack ? */
+ if ((s->len + n) > s->size) {
+ rte_spinlock_unlock(&s->sl);
+ return -ENOBUFS;
+ }
+
+ /* Add elements back into the cache */
+ for (index = 0; index < n; ++index, obj_table++)
+ cache_objs[index] = *obj_table;
+
+ s->len += n;
+
+ rte_spinlock_unlock(&s->sl);
+ return 0;
+}
+
+static int
+stack_dequeue(struct rte_mempool *mp, void **obj_table,
+ unsigned n)
+{
+ struct rte_mempool_stack *s = mp->pool_data;
+ void **cache_objs;
+ unsigned index, len;
+
+ rte_spinlock_lock(&s->sl);
+
+ if (unlikely(n > s->len)) {
+ rte_spinlock_unlock(&s->sl);
+ return -ENOENT;
+ }
+
+ cache_objs = s->objs;
+
+ for (index = 0, len = s->len - 1; index < n;
+ ++index, len--, obj_table++)
+ *obj_table = cache_objs[len];
+
+ s->len -= n;
+ rte_spinlock_unlock(&s->sl);
+ return n;
+}
+
+static unsigned
+stack_get_count(const struct rte_mempool *mp)
+{
+ struct rte_mempool_stack *s = mp->pool_data;
+
+ return s->len;
+}
+
+static void
+stack_free(struct rte_mempool *mp)
+{
+ rte_free((void *)(mp->pool_data));
+}
+
+static struct rte_mempool_ops ops_stack = {
+ .name = "stack",
+ .alloc = stack_alloc,
+ .free = stack_free,
+ .enqueue = stack_enqueue,
+ .dequeue = stack_dequeue,
+ .get_count = stack_get_count
+};
+
+MEMPOOL_REGISTER_OPS(ops_stack);
diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
index 17151e08..dee1c990 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -1,7 +1,6 @@
DPDK_2.0 {
global:
- rte_dom0_mempool_create;
rte_mempool_audit;
rte_mempool_calc_obj_size;
rte_mempool_count;
@@ -9,7 +8,6 @@ DPDK_2.0 {
rte_mempool_dump;
rte_mempool_list_dump;
rte_mempool_lookup;
- rte_mempool_obj_iter;
rte_mempool_walk;
rte_mempool_xmem_create;
rte_mempool_xmem_size;
@@ -17,3 +15,30 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_16.07 {
+ global:
+
+ rte_mempool_avail_count;
+ rte_mempool_cache_create;
+ rte_mempool_cache_flush;
+ rte_mempool_cache_free;
+ rte_mempool_check_cookies;
+ rte_mempool_create_empty;
+ rte_mempool_default_cache;
+ rte_mempool_free;
+ rte_mempool_generic_get;
+ rte_mempool_generic_put;
+ rte_mempool_in_use_count;
+ rte_mempool_mem_iter;
+ rte_mempool_obj_iter;
+ rte_mempool_ops_table;
+ rte_mempool_populate_anon;
+ rte_mempool_populate_default;
+ rte_mempool_populate_phys;
+ rte_mempool_populate_phys_tab;
+ rte_mempool_populate_virt;
+ rte_mempool_register_ops;
+ rte_mempool_set_ops_byname;
+
+} DPDK_2.0;
diff --git a/lib/librte_pdump/Makefile b/lib/librte_pdump/Makefile
new file mode 100644
index 00000000..166441a2
--- /dev/null
+++ b/lib/librte_pdump/Makefile
@@ -0,0 +1,57 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pdump.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+CFLAGS += -D_GNU_SOURCE
+LDLIBS += -lpthread
+
+EXPORT_MAP := rte_pdump_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_PDUMP) := rte_pdump.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_PDUMP)-include := rte_pdump.h
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += lib/librte_ether
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
new file mode 100644
index 00000000..ee566cb2
--- /dev/null
+++ b/lib/librte_pdump/rte_pdump.c
@@ -0,0 +1,959 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdio.h>
+
+#include <rte_memcpy.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_errno.h>
+#include <rte_pci.h>
+
+#include "rte_pdump.h"
+
+#define SOCKET_PATH_VAR_RUN "/var/run"
+#define SOCKET_PATH_HOME "HOME"
+#define DPDK_DIR "/.dpdk"
+#define SOCKET_DIR "/pdump_sockets"
+#define SERVER_SOCKET "%s/pdump_server_socket"
+#define CLIENT_SOCKET "%s/pdump_client_socket_%d_%u"
+#define DEVICE_ID_SIZE 64
+/* Macros for printing using RTE_LOG */
+#define RTE_LOGTYPE_PDUMP RTE_LOGTYPE_USER1
+
+enum pdump_operation {
+ DISABLE = 1,
+ ENABLE = 2
+};
+
+enum pdump_version {
+ V1 = 1
+};
+
+static pthread_t pdump_thread;
+static int pdump_socket_fd;
+static char server_socket_dir[PATH_MAX];
+static char client_socket_dir[PATH_MAX];
+
+struct pdump_request {
+ uint16_t ver;
+ uint16_t op;
+ uint32_t flags;
+ union pdump_data {
+ struct enable_v1 {
+ char device[DEVICE_ID_SIZE];
+ uint16_t queue;
+ struct rte_ring *ring;
+ struct rte_mempool *mp;
+ void *filter;
+ } en_v1;
+ struct disable_v1 {
+ char device[DEVICE_ID_SIZE];
+ uint16_t queue;
+ struct rte_ring *ring;
+ struct rte_mempool *mp;
+ void *filter;
+ } dis_v1;
+ } data;
+};
+
+struct pdump_response {
+ uint16_t ver;
+ uint16_t res_op;
+ int32_t err_value;
+};
+
+static struct pdump_rxtx_cbs {
+ struct rte_ring *ring;
+ struct rte_mempool *mp;
+ struct rte_eth_rxtx_callback *cb;
+ void *filter;
+} rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
+tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
+
+static inline int
+pdump_pktmbuf_copy_data(struct rte_mbuf *seg, const struct rte_mbuf *m)
+{
+ if (rte_pktmbuf_tailroom(seg) < m->data_len) {
+ RTE_LOG(ERR, PDUMP,
+ "User mempool: insufficient data_len of mbuf\n");
+ return -EINVAL;
+ }
+
+ seg->port = m->port;
+ seg->vlan_tci = m->vlan_tci;
+ seg->hash = m->hash;
+ seg->tx_offload = m->tx_offload;
+ seg->ol_flags = m->ol_flags;
+ seg->packet_type = m->packet_type;
+ seg->vlan_tci_outer = m->vlan_tci_outer;
+ seg->data_len = m->data_len;
+ seg->pkt_len = seg->data_len;
+ rte_memcpy(rte_pktmbuf_mtod(seg, void *),
+ rte_pktmbuf_mtod(m, void *),
+ rte_pktmbuf_data_len(seg));
+
+ return 0;
+}
+
+static inline struct rte_mbuf *
+pdump_pktmbuf_copy(struct rte_mbuf *m, struct rte_mempool *mp)
+{
+ struct rte_mbuf *m_dup, *seg, **prev;
+ uint32_t pktlen;
+ uint8_t nseg;
+
+ m_dup = rte_pktmbuf_alloc(mp);
+ if (unlikely(m_dup == NULL))
+ return NULL;
+
+ seg = m_dup;
+ prev = &seg->next;
+ pktlen = m->pkt_len;
+ nseg = 0;
+
+ do {
+ nseg++;
+ if (pdump_pktmbuf_copy_data(seg, m) < 0) {
+ rte_pktmbuf_free(m_dup);
+ return NULL;
+ }
+ *prev = seg;
+ prev = &seg->next;
+ } while ((m = m->next) != NULL &&
+ (seg = rte_pktmbuf_alloc(mp)) != NULL);
+
+ *prev = NULL;
+ m_dup->nb_segs = nseg;
+ m_dup->pkt_len = pktlen;
+
+ /* Allocation of new indirect segment failed */
+ if (unlikely(seg == NULL)) {
+ rte_pktmbuf_free(m_dup);
+ return NULL;
+ }
+
+ __rte_mbuf_sanity_check(m_dup, 1);
+ return m_dup;
+}
+
+static inline void
+pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
+{
+ unsigned i;
+ int ring_enq;
+ uint16_t d_pkts = 0;
+ struct rte_mbuf *dup_bufs[nb_pkts];
+ struct pdump_rxtx_cbs *cbs;
+ struct rte_ring *ring;
+ struct rte_mempool *mp;
+ struct rte_mbuf *p;
+
+ cbs = user_params;
+ ring = cbs->ring;
+ mp = cbs->mp;
+ for (i = 0; i < nb_pkts; i++) {
+ p = pdump_pktmbuf_copy(pkts[i], mp);
+ if (p)
+ dup_bufs[d_pkts++] = p;
+ }
+
+ ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts);
+ if (unlikely(ring_enq < d_pkts)) {
+ RTE_LOG(DEBUG, PDUMP,
+ "only %d of packets enqueued to ring\n", ring_enq);
+ do {
+ rte_pktmbuf_free(dup_bufs[ring_enq]);
+ } while (++ring_enq < d_pkts);
+ }
+}
+
+static uint16_t
+pdump_rx(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+ struct rte_mbuf **pkts, uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_params)
+{
+ pdump_copy(pkts, nb_pkts, user_params);
+ return nb_pkts;
+}
+
+static uint16_t
+pdump_tx(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+ struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
+{
+ pdump_copy(pkts, nb_pkts, user_params);
+ return nb_pkts;
+}
+
+static int
+pdump_get_dombdf(char *device_id, char *domBDF, size_t len)
+{
+ int ret;
+ struct rte_pci_addr dev_addr = {0};
+
+ /* identify if device_id is pci address or name */
+ ret = eal_parse_pci_DomBDF(device_id, &dev_addr);
+ if (ret < 0)
+ return -1;
+
+ if (dev_addr.domain)
+ ret = snprintf(domBDF, len, "%u:%u:%u.%u", dev_addr.domain,
+ dev_addr.bus, dev_addr.devid,
+ dev_addr.function);
+ else
+ ret = snprintf(domBDF, len, "%u:%u.%u", dev_addr.bus,
+ dev_addr.devid,
+ dev_addr.function);
+
+ return ret;
+}
+
+static int
+pdump_regitser_rx_callbacks(uint16_t end_q, uint8_t port, uint16_t queue,
+ struct rte_ring *ring, struct rte_mempool *mp,
+ uint16_t operation)
+{
+ uint16_t qid;
+ struct pdump_rxtx_cbs *cbs = NULL;
+
+ qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
+ for (; qid < end_q; qid++) {
+ cbs = &rx_cbs[port][qid];
+ if (cbs && operation == ENABLE) {
+ if (cbs->cb) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to add rx callback for port=%d "
+ "and queue=%d, callback already exists\n",
+ port, qid);
+ return -EEXIST;
+ }
+ cbs->ring = ring;
+ cbs->mp = mp;
+ cbs->cb = rte_eth_add_first_rx_callback(port, qid,
+ pdump_rx, cbs);
+ if (cbs->cb == NULL) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to add rx callback, errno=%d\n",
+ rte_errno);
+ return rte_errno;
+ }
+ }
+ if (cbs && operation == DISABLE) {
+ int ret;
+
+ if (cbs->cb == NULL) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to delete non existing rx "
+ "callback for port=%d and queue=%d\n",
+ port, qid);
+ return -EINVAL;
+ }
+ ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
+ if (ret < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to remove rx callback, errno=%d\n",
+ rte_errno);
+ return ret;
+ }
+ cbs->cb = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+pdump_regitser_tx_callbacks(uint16_t end_q, uint8_t port, uint16_t queue,
+ struct rte_ring *ring, struct rte_mempool *mp,
+ uint16_t operation)
+{
+
+ uint16_t qid;
+ struct pdump_rxtx_cbs *cbs = NULL;
+
+ qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue;
+ for (; qid < end_q; qid++) {
+ cbs = &tx_cbs[port][qid];
+ if (cbs && operation == ENABLE) {
+ if (cbs->cb) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to add tx callback for port=%d "
+ "and queue=%d, callback already exists\n",
+ port, qid);
+ return -EEXIST;
+ }
+ cbs->ring = ring;
+ cbs->mp = mp;
+ cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
+ cbs);
+ if (cbs->cb == NULL) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to add tx callback, errno=%d\n",
+ rte_errno);
+ return rte_errno;
+ }
+ }
+ if (cbs && operation == DISABLE) {
+ int ret;
+
+ if (cbs->cb == NULL) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to delete non existing tx "
+ "callback for port=%d and queue=%d\n",
+ port, qid);
+ return -EINVAL;
+ }
+ ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
+ if (ret < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to remove tx callback, errno=%d\n",
+ rte_errno);
+ return ret;
+ }
+ cbs->cb = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+set_pdump_rxtx_cbs(struct pdump_request *p)
+{
+ uint16_t nb_rx_q, nb_tx_q = 0, end_q, queue;
+ uint8_t port;
+ int ret = 0;
+ uint32_t flags;
+ uint16_t operation;
+ struct rte_ring *ring;
+ struct rte_mempool *mp;
+
+ flags = p->flags;
+ operation = p->op;
+ if (operation == ENABLE) {
+ ret = rte_eth_dev_get_port_by_name(p->data.en_v1.device,
+ &port);
+ if (ret < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to get potid for device id=%s\n",
+ p->data.en_v1.device);
+ return -EINVAL;
+ }
+ queue = p->data.en_v1.queue;
+ ring = p->data.en_v1.ring;
+ mp = p->data.en_v1.mp;
+ } else {
+ ret = rte_eth_dev_get_port_by_name(p->data.dis_v1.device,
+ &port);
+ if (ret < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to get potid for device id=%s\n",
+ p->data.dis_v1.device);
+ return -EINVAL;
+ }
+ queue = p->data.dis_v1.queue;
+ ring = p->data.dis_v1.ring;
+ mp = p->data.dis_v1.mp;
+ }
+
+ /* validation if packet capture is for all queues */
+ if (queue == RTE_PDUMP_ALL_QUEUES) {
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(port, &dev_info);
+ nb_rx_q = dev_info.nb_rx_queues;
+ nb_tx_q = dev_info.nb_tx_queues;
+ if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
+ RTE_LOG(ERR, PDUMP,
+ "number of rx queues cannot be 0\n");
+ return -EINVAL;
+ }
+ if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
+ RTE_LOG(ERR, PDUMP,
+ "number of tx queues cannot be 0\n");
+ return -EINVAL;
+ }
+ if ((nb_tx_q == 0 || nb_rx_q == 0) &&
+ flags == RTE_PDUMP_FLAG_RXTX) {
+ RTE_LOG(ERR, PDUMP,
+ "both tx&rx queues must be non zero\n");
+ return -EINVAL;
+ }
+ }
+
+ /* register RX callback */
+ if (flags & RTE_PDUMP_FLAG_RX) {
+ end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
+ ret = pdump_regitser_rx_callbacks(end_q, port, queue, ring, mp,
+ operation);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* register TX callback */
+ if (flags & RTE_PDUMP_FLAG_TX) {
+ end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
+ ret = pdump_regitser_tx_callbacks(end_q, port, queue, ring, mp,
+ operation);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+/* get socket path (/var/run if root, $HOME otherwise) */
+static int
+pdump_get_socket_path(char *buffer, int bufsz, enum rte_pdump_socktype type)
+{
+ char dpdk_dir[PATH_MAX] = {0};
+ char dir[PATH_MAX] = {0};
+ char *dir_home = NULL;
+
+ if (type == RTE_PDUMP_SOCKET_SERVER && server_socket_dir[0] != 0)
+ snprintf(dir, sizeof(dir), "%s", server_socket_dir);
+ else if (type == RTE_PDUMP_SOCKET_CLIENT && client_socket_dir[0] != 0)
+ snprintf(dir, sizeof(dir), "%s", client_socket_dir);
+ else {
+ if (getuid() != 0) {
+ dir_home = getenv(SOCKET_PATH_HOME);
+ if (!dir_home) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to get environment variable"
+ " value for %s, %s:%d\n",
+ SOCKET_PATH_HOME, __func__, __LINE__);
+ return -1;
+ }
+ snprintf(dpdk_dir, sizeof(dpdk_dir), "%s%s",
+ dir_home, DPDK_DIR);
+ } else
+ snprintf(dpdk_dir, sizeof(dpdk_dir), "%s%s",
+ SOCKET_PATH_VAR_RUN, DPDK_DIR);
+
+ mkdir(dpdk_dir, 700);
+ snprintf(dir, sizeof(dir), "%s%s",
+ dpdk_dir, SOCKET_DIR);
+ }
+
+ mkdir(dir, 700);
+ if (type == RTE_PDUMP_SOCKET_SERVER)
+ snprintf(buffer, bufsz, SERVER_SOCKET, dir);
+ else
+ snprintf(buffer, bufsz, CLIENT_SOCKET, dir, getpid(),
+ rte_sys_gettid());
+
+ return 0;
+}
+
+static int
+pdump_create_server_socket(void)
+{
+ int ret, socket_fd;
+ struct sockaddr_un addr;
+ socklen_t addr_len;
+
+ ret = pdump_get_socket_path(addr.sun_path, sizeof(addr.sun_path),
+ RTE_PDUMP_SOCKET_SERVER);
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to get server socket path: %s:%d\n",
+ __func__, __LINE__);
+ return -1;
+ }
+ addr.sun_family = AF_UNIX;
+
+ /* remove if file already exists */
+ unlink(addr.sun_path);
+
+ /* set up a server socket */
+ socket_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (socket_fd < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to create server socket: %s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ return -1;
+ }
+
+ addr_len = sizeof(struct sockaddr_un);
+ ret = bind(socket_fd, (struct sockaddr *) &addr, addr_len);
+ if (ret) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to bind to server socket: %s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ close(socket_fd);
+ return -1;
+ }
+
+ /* save the socket in local configuration */
+ pdump_socket_fd = socket_fd;
+
+ return 0;
+}
+
+static __attribute__((noreturn)) void *
+pdump_thread_main(__rte_unused void *arg)
+{
+ struct sockaddr_un cli_addr;
+ socklen_t cli_len;
+ struct pdump_request cli_req;
+ struct pdump_response resp;
+ int n;
+ int ret = 0;
+
+ /* host thread, never break out */
+ for (;;) {
+ /* recv client requests */
+ cli_len = sizeof(cli_addr);
+ n = recvfrom(pdump_socket_fd, &cli_req,
+ sizeof(struct pdump_request), 0,
+ (struct sockaddr *)&cli_addr, &cli_len);
+ if (n < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to recv from client:%s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ continue;
+ }
+
+ ret = set_pdump_rxtx_cbs(&cli_req);
+
+ resp.ver = cli_req.ver;
+ resp.res_op = cli_req.op;
+ resp.err_value = ret;
+ n = sendto(pdump_socket_fd, &resp,
+ sizeof(struct pdump_response),
+ 0, (struct sockaddr *)&cli_addr, cli_len);
+ if (n < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to send to client:%s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ }
+ }
+}
+
+int
+rte_pdump_init(const char *path)
+{
+ int ret = 0;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
+
+ ret = rte_pdump_set_socket_dir(path, RTE_PDUMP_SOCKET_SERVER);
+ if (ret != 0)
+ return -1;
+
+ ret = pdump_create_server_socket();
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to create server socket:%s:%d\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ /* create the host thread to wait/handle pdump requests */
+ ret = pthread_create(&pdump_thread, NULL, pdump_thread_main, NULL);
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to create the pdump thread:%s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ return -1;
+ }
+ /* Set thread_name for aid in debugging. */
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "pdump-thread");
+ ret = rte_thread_setname(pdump_thread, thread_name);
+ if (ret != 0) {
+ RTE_LOG(DEBUG, PDUMP,
+ "Failed to set thread name for pdump handling\n");
+ }
+
+ return 0;
+}
+
+int
+rte_pdump_uninit(void)
+{
+ int ret;
+
+ ret = pthread_cancel(pdump_thread);
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to cancel the pdump thread:%s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ return -1;
+ }
+
+ ret = close(pdump_socket_fd);
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to close server socket: %s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ return -1;
+ }
+
+ struct sockaddr_un addr;
+
+ ret = pdump_get_socket_path(addr.sun_path, sizeof(addr.sun_path),
+ RTE_PDUMP_SOCKET_SERVER);
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to get server socket path: %s:%d\n",
+ __func__, __LINE__);
+ return -1;
+ }
+ ret = unlink(addr.sun_path);
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to remove server socket addr: %s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pdump_create_client_socket(struct pdump_request *p)
+{
+ int ret, socket_fd;
+ int pid;
+ int n;
+ struct pdump_response server_resp;
+ struct sockaddr_un addr, serv_addr, from;
+ socklen_t addr_len, serv_len;
+
+ pid = getpid();
+
+ socket_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (socket_fd < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "client socket(): %s:pid(%d):tid(%u), %s:%d\n",
+ strerror(errno), pid, rte_sys_gettid(),
+ __func__, __LINE__);
+ ret = errno;
+ return ret;
+ }
+
+ ret = pdump_get_socket_path(addr.sun_path, sizeof(addr.sun_path),
+ RTE_PDUMP_SOCKET_CLIENT);
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to get client socket path: %s:%d\n",
+ __func__, __LINE__);
+ return -1;
+ }
+ addr.sun_family = AF_UNIX;
+ addr_len = sizeof(struct sockaddr_un);
+
+ do {
+ ret = bind(socket_fd, (struct sockaddr *) &addr, addr_len);
+ if (ret) {
+ RTE_LOG(ERR, PDUMP,
+ "client bind(): %s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ ret = errno;
+ break;
+ }
+
+ serv_len = sizeof(struct sockaddr_un);
+ memset(&serv_addr, 0, sizeof(serv_addr));
+ ret = pdump_get_socket_path(serv_addr.sun_path,
+ sizeof(serv_addr.sun_path),
+ RTE_PDUMP_SOCKET_SERVER);
+ if (ret != 0) {
+ RTE_LOG(ERR, PDUMP,
+ "Failed to get server socket path: %s:%d\n",
+ __func__, __LINE__);
+ break;
+ }
+ serv_addr.sun_family = AF_UNIX;
+
+ n = sendto(socket_fd, p, sizeof(struct pdump_request), 0,
+ (struct sockaddr *)&serv_addr, serv_len);
+ if (n < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to send to server:%s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ ret = errno;
+ break;
+ }
+
+ n = recvfrom(socket_fd, &server_resp,
+ sizeof(struct pdump_response), 0,
+ (struct sockaddr *)&from, &serv_len);
+ if (n < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "failed to recv from server:%s, %s:%d\n",
+ strerror(errno), __func__, __LINE__);
+ ret = errno;
+ break;
+ }
+ ret = server_resp.err_value;
+ } while (0);
+
+ close(socket_fd);
+ unlink(addr.sun_path);
+ return ret;
+}
+
+static int
+pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
+{
+ if (ring == NULL || mp == NULL) {
+ RTE_LOG(ERR, PDUMP, "NULL ring or mempool are passed %s:%d\n",
+ __func__, __LINE__);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ if (mp->flags & MEMPOOL_F_SP_PUT || mp->flags & MEMPOOL_F_SC_GET) {
+ RTE_LOG(ERR, PDUMP, "mempool with either SP or SC settings"
+ " is not valid for pdump, should have MP and MC settings\n");
+ rte_errno = EINVAL;
+ return -1;
+ }
+ if (ring->prod.sp_enqueue || ring->cons.sc_dequeue) {
+ RTE_LOG(ERR, PDUMP, "ring with either SP or SC settings"
+ " is not valid for pdump, should have MP and MC settings\n");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pdump_validate_flags(uint32_t flags)
+{
+ if (flags != RTE_PDUMP_FLAG_RX && flags != RTE_PDUMP_FLAG_TX &&
+ flags != RTE_PDUMP_FLAG_RXTX) {
+ RTE_LOG(ERR, PDUMP,
+ "invalid flags, should be either rx/tx/rxtx\n");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pdump_validate_port(uint8_t port, char *name)
+{
+ int ret = 0;
+
+ if (port >= RTE_MAX_ETHPORTS) {
+ RTE_LOG(ERR, PDUMP, "Invalid port id %u, %s:%d\n", port,
+ __func__, __LINE__);
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ ret = rte_eth_dev_get_name_by_port(port, name);
+ if (ret < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "port id to name mapping failed for port id=%u, %s:%d\n",
+ port, __func__, __LINE__);
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+pdump_prepare_client_request(char *device, uint16_t queue,
+ uint32_t flags,
+ uint16_t operation,
+ struct rte_ring *ring,
+ struct rte_mempool *mp,
+ void *filter)
+{
+ int ret;
+ struct pdump_request req = {.ver = 1,};
+
+ req.flags = flags;
+ req.op = operation;
+ if ((operation & ENABLE) != 0) {
+ snprintf(req.data.en_v1.device, sizeof(req.data.en_v1.device),
+ "%s", device);
+ req.data.en_v1.queue = queue;
+ req.data.en_v1.ring = ring;
+ req.data.en_v1.mp = mp;
+ req.data.en_v1.filter = filter;
+ } else {
+ snprintf(req.data.dis_v1.device, sizeof(req.data.dis_v1.device),
+ "%s", device);
+ req.data.dis_v1.queue = queue;
+ req.data.dis_v1.ring = NULL;
+ req.data.dis_v1.mp = NULL;
+ req.data.dis_v1.filter = NULL;
+ }
+
+ ret = pdump_create_client_socket(&req);
+ if (ret < 0) {
+ RTE_LOG(ERR, PDUMP,
+ "client request for pdump enable/disable failed\n");
+ rte_errno = ret;
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_pdump_enable(uint8_t port, uint16_t queue, uint32_t flags,
+ struct rte_ring *ring,
+ struct rte_mempool *mp,
+ void *filter)
+{
+
+ int ret = 0;
+ char name[DEVICE_ID_SIZE];
+
+ ret = pdump_validate_port(port, name);
+ if (ret < 0)
+ return ret;
+ ret = pdump_validate_ring_mp(ring, mp);
+ if (ret < 0)
+ return ret;
+ ret = pdump_validate_flags(flags);
+ if (ret < 0)
+ return ret;
+
+ ret = pdump_prepare_client_request(name, queue, flags,
+ ENABLE, ring, mp, filter);
+
+ return ret;
+}
+
+int
+rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
+ uint32_t flags,
+ struct rte_ring *ring,
+ struct rte_mempool *mp,
+ void *filter)
+{
+ int ret = 0;
+ char domBDF[DEVICE_ID_SIZE];
+
+ ret = pdump_validate_ring_mp(ring, mp);
+ if (ret < 0)
+ return ret;
+ ret = pdump_validate_flags(flags);
+ if (ret < 0)
+ return ret;
+
+ if (pdump_get_dombdf(device_id, domBDF, sizeof(domBDF)) > 0)
+ ret = pdump_prepare_client_request(domBDF, queue, flags,
+ ENABLE, ring, mp, filter);
+ else
+ ret = pdump_prepare_client_request(device_id, queue, flags,
+ ENABLE, ring, mp, filter);
+
+ return ret;
+}
+
+int
+rte_pdump_disable(uint8_t port, uint16_t queue, uint32_t flags)
+{
+ int ret = 0;
+ char name[DEVICE_ID_SIZE];
+
+ ret = pdump_validate_port(port, name);
+ if (ret < 0)
+ return ret;
+ ret = pdump_validate_flags(flags);
+ if (ret < 0)
+ return ret;
+
+ ret = pdump_prepare_client_request(name, queue, flags,
+ DISABLE, NULL, NULL, NULL);
+
+ return ret;
+}
+
+int
+rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
+ uint32_t flags)
+{
+ int ret = 0;
+ char domBDF[DEVICE_ID_SIZE];
+
+ ret = pdump_validate_flags(flags);
+ if (ret < 0)
+ return ret;
+
+ if (pdump_get_dombdf(device_id, domBDF, sizeof(domBDF)) > 0)
+ ret = pdump_prepare_client_request(domBDF, queue, flags,
+ DISABLE, NULL, NULL, NULL);
+ else
+ ret = pdump_prepare_client_request(device_id, queue, flags,
+ DISABLE, NULL, NULL, NULL);
+
+ return ret;
+}
+
+int
+rte_pdump_set_socket_dir(const char *path, enum rte_pdump_socktype type)
+{
+ int ret, count;
+
+ if (path != NULL) {
+ if (type == RTE_PDUMP_SOCKET_SERVER) {
+ count = sizeof(server_socket_dir);
+ ret = snprintf(server_socket_dir, count, "%s", path);
+ } else {
+ count = sizeof(client_socket_dir);
+ ret = snprintf(client_socket_dir, count, "%s", path);
+ }
+
+ if (ret < 0 || ret >= count) {
+ RTE_LOG(ERR, PDUMP,
+ "Invalid socket path:%s:%d\n",
+ __func__, __LINE__);
+ if (type == RTE_PDUMP_SOCKET_SERVER)
+ server_socket_dir[0] = 0;
+ else
+ client_socket_dir[0] = 0;
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/lib/librte_pdump/rte_pdump.h b/lib/librte_pdump/rte_pdump.h
new file mode 100644
index 00000000..b5f4e2f3
--- /dev/null
+++ b/lib/librte_pdump/rte_pdump.h
@@ -0,0 +1,216 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_PDUMP_H_
+#define _RTE_PDUMP_H_
+
+/**
+ * @file
+ * RTE pdump
+ *
+ * packet dump library to provide packet capturing support on dpdk.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_PDUMP_ALL_QUEUES UINT16_MAX
+
+enum {
+ RTE_PDUMP_FLAG_RX = 1, /* receive direction */
+ RTE_PDUMP_FLAG_TX = 2, /* transmit direction */
+ /* both receive and transmit directions */
+ RTE_PDUMP_FLAG_RXTX = (RTE_PDUMP_FLAG_RX|RTE_PDUMP_FLAG_TX)
+};
+
+enum rte_pdump_socktype {
+ RTE_PDUMP_SOCKET_SERVER = 1,
+ RTE_PDUMP_SOCKET_CLIENT = 2
+};
+
+/**
+ * Initialize packet capturing handling
+ *
+ * Creates pthread and server socket for handling clients
+ * requests to enable/disable rxtx callbacks.
+ *
+ * @param path
+ * directory path for server socket.
+ *
+ * @return
+ * 0 on success, -1 on error
+ */
+int
+rte_pdump_init(const char *path);
+
+/**
+ * Un initialize packet capturing handling
+ *
+ * Cancels pthread, close server socket, removes server socket address.
+ *
+ * @return
+ * 0 on success, -1 on error
+ */
+int
+rte_pdump_uninit(void);
+
+/**
+ * Enables packet capturing on given port and queue.
+ *
+ * @param port
+ * port on which packet capturing should be enabled.
+ * @param queue
+ * queue of a given port on which packet capturing should be enabled.
+ * users should pass on value UINT16_MAX to enable packet capturing on all
+ * queues of a given port.
+ * @param flags
+ * flags specifies RTE_PDUMP_FLAG_RX/RTE_PDUMP_FLAG_TX/RTE_PDUMP_FLAG_RXTX
+ * on which packet capturing should be enabled for a given port and queue.
+ * @param ring
+ * ring on which captured packets will be enqueued for user.
+ * @param mp
+ * mempool on to which original packets will be mirrored or duplicated.
+ * @param filter
+ * place holder for packet filtering.
+ *
+ * @return
+ * 0 on success, -1 on error, rte_errno is set accordingly.
+ */
+
+int
+rte_pdump_enable(uint8_t port, uint16_t queue, uint32_t flags,
+ struct rte_ring *ring,
+ struct rte_mempool *mp,
+ void *filter);
+
+/**
+ * Disables packet capturing on given port and queue.
+ *
+ * @param port
+ * port on which packet capturing should be disabled.
+ * @param queue
+ * queue of a given port on which packet capturing should be disabled.
+ * users should pass on value UINT16_MAX to disable packet capturing on all
+ * queues of a given port.
+ * @param flags
+ * flags specifies RTE_PDUMP_FLAG_RX/RTE_PDUMP_FLAG_TX/RTE_PDUMP_FLAG_RXTX
+ * on which packet capturing should be enabled for a given port and queue.
+ *
+ * @return
+ * 0 on success, -1 on error, rte_errno is set accordingly.
+ */
+
+int
+rte_pdump_disable(uint8_t port, uint16_t queue, uint32_t flags);
+
+/**
+ * Enables packet capturing on given device id and queue.
+ * device_id can be name or pci address of device.
+ *
+ * @param device_id
+ * device id on which packet capturing should be enabled.
+ * @param queue
+ * queue of a given device id on which packet capturing should be enabled.
+ * users should pass on value UINT16_MAX to enable packet capturing on all
+ * queues of a given device id.
+ * @param flags
+ * flags specifies RTE_PDUMP_FLAG_RX/RTE_PDUMP_FLAG_TX/RTE_PDUMP_FLAG_RXTX
+ * on which packet capturing should be enabled for a given port and queue.
+ * @param ring
+ * ring on which captured packets will be enqueued for user.
+ * @param mp
+ * mempool on to which original packets will be mirrored or duplicated.
+ * @param filter
+ * place holder for packet filtering.
+ *
+ * @return
+ * 0 on success, -1 on error, rte_errno is set accordingly.
+ */
+
+int
+rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue,
+ uint32_t flags,
+ struct rte_ring *ring,
+ struct rte_mempool *mp,
+ void *filter);
+
+/**
+ * Disables packet capturing on given device_id and queue.
+ * device_id can be name or pci address of device.
+ *
+ * @param device_id
+ * pci address or name of the device on which packet capturing
+ * should be disabled.
+ * @param queue
+ * queue of a given device on which packet capturing should be disabled.
+ * users should pass on value UINT16_MAX to disable packet capturing on all
+ * queues of a given device id.
+ * @param flags
+ * flags specifies RTE_PDUMP_FLAG_RX/RTE_PDUMP_FLAG_TX/RTE_PDUMP_FLAG_RXTX
+ * on which packet capturing should be enabled for a given port and queue.
+ *
+ * @return
+ * 0 on success, -1 on error, rte_errno is set accordingly.
+ */
+int
+rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
+ uint32_t flags);
+
+/**
+ * Allows applications to set server and client socket paths.
+ * If specified path is null default path will be selected, i.e.
+ *"/var/run/" for root user and "$HOME" for non root user.
+ * Clients also need to call this API to set their server path if the
+ * server path is different from default path.
+ * This API is not thread-safe.
+ *
+ * @param path
+ * directory path for server or client socket.
+ * @type
+ * specifies RTE_PDUMP_SOCKET_SERVER if socket path is for server.
+ * (or)
+ * specifies RTE_PDUMP_SOCKET_CLIENT if socket path is for client.
+ *
+ * @return
+ * 0 on success, -EINVAL on error
+ *
+ */
+int
+rte_pdump_set_socket_dir(const char *path, enum rte_pdump_socktype type);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PDUMP_H_ */
diff --git a/lib/librte_pdump/rte_pdump_version.map b/lib/librte_pdump/rte_pdump_version.map
new file mode 100644
index 00000000..edec99a4
--- /dev/null
+++ b/lib/librte_pdump/rte_pdump_version.map
@@ -0,0 +1,13 @@
+DPDK_16.07 {
+ global:
+
+ rte_pdump_disable;
+ rte_pdump_disable_by_deviceid;
+ rte_pdump_enable;
+ rte_pdump_enable_by_deviceid;
+ rte_pdump_init;
+ rte_pdump_set_socket_dir;
+ rte_pdump_uninit;
+
+ local: *;
+};
diff --git a/lib/librte_pipeline/Makefile b/lib/librte_pipeline/Makefile
index 822fd41c..05d64ff8 100644
--- a/lib/librte_pipeline/Makefile
+++ b/lib/librte_pipeline/Makefile
@@ -52,7 +52,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := rte_pipeline.c
SYMLINK-$(CONFIG_RTE_LIBRTE_PIPELINE)-include += rte_pipeline.h
# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) := lib/librte_table
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_table
DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_port
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_port/Makefile b/lib/librte_port/Makefile
index 2c0ccbe5..3d84a0e4 100644
--- a/lib/librte_port/Makefile
+++ b/lib/librte_port/Makefile
@@ -44,7 +44,7 @@ CFLAGS += $(WERROR_FLAGS)
EXPORT_MAP := rte_port_version.map
-LIBABIVER := 2
+LIBABIVER := 3
#
# all source are stored in SRCS-y
@@ -56,6 +56,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_frag.c
SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_ras.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_sched.c
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_kni.c
+endif
SRCS-$(CONFIG_RTE_LIBRTE_PORT) += rte_port_source_sink.c
# install includes
@@ -67,6 +70,9 @@ SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_frag.h
SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_ras.h
endif
SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_sched.h
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_kni.h
+endif
SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_source_sink.h
# this lib depends upon:
@@ -75,5 +81,9 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_ip_frag
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_sched
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_kni
+endif
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_port/rte_port_kni.c b/lib/librte_port/rte_port_kni.c
new file mode 100644
index 00000000..08f4ac2a
--- /dev/null
+++ b/lib/librte_port/rte_port_kni.c
@@ -0,0 +1,545 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Ethan Zhuang <zhuangwj@gmail.com>.
+ * Copyright(c) 2016 Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_kni.h>
+
+#include "rte_port_kni.h"
+
+/*
+ * Port KNI Reader
+ */
+#ifdef RTE_PORT_STATS_COLLECT
+
+#define RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(port, val) \
+ port->stats.n_pkts_in += val
+#define RTE_PORT_KNI_READER_STATS_PKTS_DROP_ADD(port, val) \
+ port->stats.n_pkts_drop += val
+
+#else
+
+#define RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(port, val)
+#define RTE_PORT_KNI_READER_STATS_PKTS_DROP_ADD(port, val)
+
+#endif
+
+struct rte_port_kni_reader {
+ struct rte_port_in_stats stats;
+
+ struct rte_kni *kni;
+};
+
+static void *
+rte_port_kni_reader_create(void *params, int socket_id)
+{
+ struct rte_port_kni_reader_params *conf =
+ (struct rte_port_kni_reader_params *) params;
+ struct rte_port_kni_reader *port;
+
+ /* Check input parameters */
+ if (conf == NULL) {
+ RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->kni = conf->kni;
+
+ return port;
+}
+
+static int
+rte_port_kni_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
+{
+ struct rte_port_kni_reader *p =
+ (struct rte_port_kni_reader *) port;
+ uint16_t rx_pkt_cnt;
+
+ rx_pkt_cnt = rte_kni_rx_burst(p->kni, pkts, n_pkts);
+ RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt);
+ return rx_pkt_cnt;
+}
+
+static int
+rte_port_kni_reader_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_free(port);
+
+ return 0;
+}
+
+static int rte_port_kni_reader_stats_read(void *port,
+ struct rte_port_in_stats *stats, int clear)
+{
+ struct rte_port_kni_reader *p =
+ (struct rte_port_kni_reader *) port;
+
+ if (stats != NULL)
+ memcpy(stats, &p->stats, sizeof(p->stats));
+
+ if (clear)
+ memset(&p->stats, 0, sizeof(p->stats));
+
+ return 0;
+}
+
+/*
+ * Port KNI Writer
+ */
+#ifdef RTE_PORT_STATS_COLLECT
+
+#define RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(port, val) \
+ port->stats.n_pkts_in += val
+#define RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(port, val) \
+ port->stats.n_pkts_drop += val
+
+#else
+
+#define RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(port, val)
+#define RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(port, val)
+
+#endif
+
+struct rte_port_kni_writer {
+ struct rte_port_out_stats stats;
+
+ struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
+ uint32_t tx_burst_sz;
+ uint32_t tx_buf_count;
+ uint64_t bsz_mask;
+ struct rte_kni *kni;
+};
+
+static void *
+rte_port_kni_writer_create(void *params, int socket_id)
+{
+ struct rte_port_kni_writer_params *conf =
+ (struct rte_port_kni_writer_params *) params;
+ struct rte_port_kni_writer *port;
+
+ /* Check input parameters */
+ if ((conf == NULL) ||
+ (conf->tx_burst_sz == 0) ||
+ (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
+ (!rte_is_power_of_2(conf->tx_burst_sz))) {
+ RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->kni = conf->kni;
+ port->tx_burst_sz = conf->tx_burst_sz;
+ port->tx_buf_count = 0;
+ port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
+
+ return port;
+}
+
+static inline void
+send_burst(struct rte_port_kni_writer *p)
+{
+ uint32_t nb_tx;
+
+ nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);
+
+ RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
+ for (; nb_tx < p->tx_buf_count; nb_tx++)
+ rte_pktmbuf_free(p->tx_buf[nb_tx]);
+
+ p->tx_buf_count = 0;
+}
+
+static int
+rte_port_kni_writer_tx(void *port, struct rte_mbuf *pkt)
+{
+ struct rte_port_kni_writer *p =
+ (struct rte_port_kni_writer *) port;
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+
+ return 0;
+}
+
+static int
+rte_port_kni_writer_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ struct rte_port_kni_writer *p =
+ (struct rte_port_kni_writer *) port;
+ uint64_t bsz_mask = p->bsz_mask;
+ uint32_t tx_buf_count = p->tx_buf_count;
+ uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
+ ((pkts_mask & bsz_mask) ^ bsz_mask);
+
+ if (expr == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t n_pkts_ok;
+
+ if (tx_buf_count)
+ send_burst(p);
+
+ RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
+ n_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts);
+
+ RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
+ for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
+ struct rte_mbuf *pkt = pkts[n_pkts_ok];
+
+ rte_pktmbuf_free(pkt);
+ }
+ } else {
+ for (; pkts_mask;) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+
+ p->tx_buf[tx_buf_count++] = pkt;
+ RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
+ pkts_mask &= ~pkt_mask;
+ }
+
+ p->tx_buf_count = tx_buf_count;
+ if (tx_buf_count >= p->tx_burst_sz)
+ send_burst(p);
+ }
+
+ return 0;
+}
+
+static int
+rte_port_kni_writer_flush(void *port)
+{
+ struct rte_port_kni_writer *p =
+ (struct rte_port_kni_writer *) port;
+
+ if (p->tx_buf_count > 0)
+ send_burst(p);
+
+ return 0;
+}
+
+static int
+rte_port_kni_writer_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_port_kni_writer_flush(port);
+ rte_free(port);
+
+ return 0;
+}
+
+static int rte_port_kni_writer_stats_read(void *port,
+ struct rte_port_out_stats *stats, int clear)
+{
+ struct rte_port_kni_writer *p =
+ (struct rte_port_kni_writer *) port;
+
+ if (stats != NULL)
+ memcpy(stats, &p->stats, sizeof(p->stats));
+
+ if (clear)
+ memset(&p->stats, 0, sizeof(p->stats));
+
+ return 0;
+}
+
+/*
+ * Port KNI Writer Nodrop
+ */
+#ifdef RTE_PORT_STATS_COLLECT
+
+#define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
+ port->stats.n_pkts_in += val
+#define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
+ port->stats.n_pkts_drop += val
+
+#else
+
+#define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
+#define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
+
+#endif
+
+struct rte_port_kni_writer_nodrop {
+ struct rte_port_out_stats stats;
+
+ struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
+ uint32_t tx_burst_sz;
+ uint32_t tx_buf_count;
+ uint64_t bsz_mask;
+ uint64_t n_retries;
+ struct rte_kni *kni;
+};
+
+static void *
+rte_port_kni_writer_nodrop_create(void *params, int socket_id)
+{
+ struct rte_port_kni_writer_nodrop_params *conf =
+ (struct rte_port_kni_writer_nodrop_params *) params;
+ struct rte_port_kni_writer_nodrop *port;
+
+ /* Check input parameters */
+ if ((conf == NULL) ||
+ (conf->tx_burst_sz == 0) ||
+ (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
+ (!rte_is_power_of_2(conf->tx_burst_sz))) {
+ RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ port = rte_zmalloc_socket("PORT", sizeof(*port),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
+ return NULL;
+ }
+
+ /* Initialization */
+ port->kni = conf->kni;
+ port->tx_burst_sz = conf->tx_burst_sz;
+ port->tx_buf_count = 0;
+ port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
+
+ /*
+ * When n_retries is 0 it means that we should wait for every packet to
+ * send no matter how many retries should it take. To limit number of
+ * branches in fast path, we use UINT64_MAX instead of branching.
+ */
+ port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
+
+ return port;
+}
+
+static inline void
+send_burst_nodrop(struct rte_port_kni_writer_nodrop *p)
+{
+ uint32_t nb_tx = 0, i;
+
+ nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);
+
+ /* We sent all the packets in a first try */
+ if (nb_tx >= p->tx_buf_count) {
+ p->tx_buf_count = 0;
+ return;
+ }
+
+ for (i = 0; i < p->n_retries; i++) {
+ nb_tx += rte_kni_tx_burst(p->kni,
+ p->tx_buf + nb_tx,
+ p->tx_buf_count - nb_tx);
+
+ /* We sent all the packets in more than one try */
+ if (nb_tx >= p->tx_buf_count) {
+ p->tx_buf_count = 0;
+ return;
+ }
+ }
+
+ /* We didn't send the packets in maximum allowed attempts */
+ RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
+ for ( ; nb_tx < p->tx_buf_count; nb_tx++)
+ rte_pktmbuf_free(p->tx_buf[nb_tx]);
+
+ p->tx_buf_count = 0;
+}
+
+static int
+rte_port_kni_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
+{
+ struct rte_port_kni_writer_nodrop *p =
+ (struct rte_port_kni_writer_nodrop *) port;
+
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
+ if (p->tx_buf_count >= p->tx_burst_sz)
+ send_burst_nodrop(p);
+
+ return 0;
+}
+
+static int
+rte_port_kni_writer_nodrop_tx_bulk(void *port,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask)
+{
+ struct rte_port_kni_writer_nodrop *p =
+ (struct rte_port_kni_writer_nodrop *) port;
+
+ uint64_t bsz_mask = p->bsz_mask;
+ uint32_t tx_buf_count = p->tx_buf_count;
+ uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
+ ((pkts_mask & bsz_mask) ^ bsz_mask);
+
+ if (expr == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t n_pkts_ok;
+
+ if (tx_buf_count)
+ send_burst_nodrop(p);
+
+ RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
+ n_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts);
+
+ if (n_pkts_ok >= n_pkts)
+ return 0;
+
+ /*
+ * If we didn't manage to send all packets in single burst, move
+ * remaining packets to the buffer and call send burst.
+ */
+ for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
+ struct rte_mbuf *pkt = pkts[n_pkts_ok];
+ p->tx_buf[p->tx_buf_count++] = pkt;
+ }
+ send_burst_nodrop(p);
+ } else {
+ for ( ; pkts_mask; ) {
+ uint32_t pkt_index = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ struct rte_mbuf *pkt = pkts[pkt_index];
+
+ p->tx_buf[tx_buf_count++] = pkt;
+ RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
+ pkts_mask &= ~pkt_mask;
+ }
+
+ p->tx_buf_count = tx_buf_count;
+ if (tx_buf_count >= p->tx_burst_sz)
+ send_burst_nodrop(p);
+ }
+
+ return 0;
+}
+
+static int
+rte_port_kni_writer_nodrop_flush(void *port)
+{
+ struct rte_port_kni_writer_nodrop *p =
+ (struct rte_port_kni_writer_nodrop *) port;
+
+ if (p->tx_buf_count > 0)
+ send_burst_nodrop(p);
+
+ return 0;
+}
+
+static int
+rte_port_kni_writer_nodrop_free(void *port)
+{
+ if (port == NULL) {
+ RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ rte_port_kni_writer_nodrop_flush(port);
+ rte_free(port);
+
+ return 0;
+}
+
+static int rte_port_kni_writer_nodrop_stats_read(void *port,
+ struct rte_port_out_stats *stats, int clear)
+{
+ struct rte_port_kni_writer_nodrop *p =
+ (struct rte_port_kni_writer_nodrop *) port;
+
+ if (stats != NULL)
+ memcpy(stats, &p->stats, sizeof(p->stats));
+
+ if (clear)
+ memset(&p->stats, 0, sizeof(p->stats));
+
+ return 0;
+}
+
+
+/*
+ * Summary of port operations
+ */
+struct rte_port_in_ops rte_port_kni_reader_ops = {
+ .f_create = rte_port_kni_reader_create,
+ .f_free = rte_port_kni_reader_free,
+ .f_rx = rte_port_kni_reader_rx,
+ .f_stats = rte_port_kni_reader_stats_read,
+};
+
+struct rte_port_out_ops rte_port_kni_writer_ops = {
+ .f_create = rte_port_kni_writer_create,
+ .f_free = rte_port_kni_writer_free,
+ .f_tx = rte_port_kni_writer_tx,
+ .f_tx_bulk = rte_port_kni_writer_tx_bulk,
+ .f_flush = rte_port_kni_writer_flush,
+ .f_stats = rte_port_kni_writer_stats_read,
+};
+
+struct rte_port_out_ops rte_port_kni_writer_nodrop_ops = {
+ .f_create = rte_port_kni_writer_nodrop_create,
+ .f_free = rte_port_kni_writer_nodrop_free,
+ .f_tx = rte_port_kni_writer_nodrop_tx,
+ .f_tx_bulk = rte_port_kni_writer_nodrop_tx_bulk,
+ .f_flush = rte_port_kni_writer_nodrop_flush,
+ .f_stats = rte_port_kni_writer_nodrop_stats_read,
+};
diff --git a/lib/librte_port/rte_port_kni.h b/lib/librte_port/rte_port_kni.h
new file mode 100644
index 00000000..4b60689c
--- /dev/null
+++ b/lib/librte_port/rte_port_kni.h
@@ -0,0 +1,95 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Ethan Zhuang <zhuangwj@gmail.com>.
+ * Copyright(c) 2016 Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_RTE_PORT_KNI_H__
+#define __INCLUDE_RTE_PORT_KNI_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Port KNI Interface
+ *
+ * kni_reader: input port built on top of pre-initialized KNI interface
+ * kni_writer: output port built on top of pre-initialized KNI interface
+ *
+ ***/
+
+#include <stdint.h>
+
+#include <rte_kni.h>
+
+#include "rte_port.h"
+
+/** kni_reader port parameters */
+struct rte_port_kni_reader_params {
+ /** KNI interface reference */
+ struct rte_kni *kni;
+};
+
+/** kni_reader port operations */
+extern struct rte_port_in_ops rte_port_kni_reader_ops;
+
+
+/** kni_writer port parameters */
+struct rte_port_kni_writer_params {
+ /** KNI interface reference */
+ struct rte_kni *kni;
+ /** Burst size to KNI interface. */
+ uint32_t tx_burst_sz;
+};
+
+/** kni_writer port operations */
+extern struct rte_port_out_ops rte_port_kni_writer_ops;
+
+/** kni_writer_nodrop port parameters */
+struct rte_port_kni_writer_nodrop_params {
+ /** KNI interface reference */
+ struct rte_kni *kni;
+ /** Burst size to KNI interface. */
+ uint32_t tx_burst_sz;
+ /** Maximum number of retries, 0 for no limit */
+ uint32_t n_retries;
+};
+
+/** kni_writer_nodrop port operations */
+extern struct rte_port_out_ops rte_port_kni_writer_nodrop_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_port/rte_port_source_sink.c b/lib/librte_port/rte_port_source_sink.c
index 056c9756..4cad7109 100644
--- a/lib/librte_port/rte_port_source_sink.c
+++ b/lib/librte_port/rte_port_source_sink.c
@@ -38,17 +38,11 @@
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#ifdef RTE_NEXT_ABI
-
#ifdef RTE_PORT_PCAP
#include <rte_ether.h>
#include <pcap.h>
#endif
-#else
-#undef RTE_PORT_PCAP
-#endif
-
#include "rte_port_source_sink.h"
/*
@@ -81,8 +75,6 @@ struct rte_port_source {
uint32_t pkt_index;
};
-#ifdef RTE_NEXT_ABI
-
#ifdef RTE_PORT_PCAP
static int
@@ -232,8 +224,6 @@ error_exit:
#endif /* RTE_PORT_PCAP */
-#endif /* RTE_NEXT_ABI */
-
static void *
rte_port_source_create(void *params, int socket_id)
{
@@ -258,8 +248,6 @@ rte_port_source_create(void *params, int socket_id)
/* Initialization */
port->mempool = (struct rte_mempool *) p->mempool;
-#ifdef RTE_NEXT_ABI
-
if (p->file_name) {
int status = PCAP_SOURCE_LOAD(port, p->file_name,
p->n_bytes_per_pkt, socket_id);
@@ -270,8 +258,6 @@ rte_port_source_create(void *params, int socket_id)
}
}
-#endif
-
return port;
}
diff --git a/lib/librte_port/rte_port_source_sink.h b/lib/librte_port/rte_port_source_sink.h
index 917abe4f..4db8a8a8 100644
--- a/lib/librte_port/rte_port_source_sink.h
+++ b/lib/librte_port/rte_port_source_sink.h
@@ -53,7 +53,6 @@ extern "C" {
struct rte_port_source_params {
/** Pre-initialized buffer pool */
struct rte_mempool *mempool;
-#ifdef RTE_NEXT_ABI
/** The full path of the pcap file to read packets from */
char *file_name;
@@ -62,8 +61,6 @@ struct rte_port_source_params {
* if it is bigger than packet size, the generated packets
* will contain the whole packet */
uint32_t n_bytes_per_pkt;
-
-#endif
};
/** source port operations */
diff --git a/lib/librte_port/rte_port_version.map b/lib/librte_port/rte_port_version.map
index 7a0b34d0..048c20d7 100644
--- a/lib/librte_port/rte_port_version.map
+++ b/lib/librte_port/rte_port_version.map
@@ -5,10 +5,8 @@ DPDK_2.0 {
rte_port_ethdev_writer_ops;
rte_port_ring_reader_ipv4_frag_ops;
rte_port_ring_reader_ops;
- rte_port_ring_reader_ops;
rte_port_ring_writer_ipv4_ras_ops;
rte_port_ring_writer_ops;
- rte_port_ring_writer_ops;
rte_port_sched_reader_ops;
rte_port_sched_writer_ops;
rte_port_sink_ops;
@@ -35,3 +33,12 @@ DPDK_2.2 {
rte_port_ring_multi_writer_nodrop_ops;
} DPDK_2.1;
+
+DPDK_16.07 {
+ global:
+
+ rte_port_kni_reader_ops;
+ rte_port_kni_writer_ops;
+ rte_port_kni_writer_nodrop_ops;
+
+} DPDK_2.2;
diff --git a/lib/librte_power/guest_channel.c b/lib/librte_power/guest_channel.c
index d6b6d0aa..85c92fab 100644
--- a/lib/librte_power/guest_channel.c
+++ b/lib/librte_power/guest_channel.c
@@ -103,8 +103,10 @@ guest_channel_host_connect(const char *path, unsigned lcore_id)
global_fds[lcore_id] = fd;
ret = guest_channel_send_msg(&pkt, lcore_id);
if (ret != 0) {
- RTE_LOG(ERR, GUEST_CHANNEL, "Error on channel '%s' communications "
- "test: %s\n", fd_path, strerror(ret));
+ RTE_LOG(ERR, GUEST_CHANNEL,
+ "Error on channel '%s' communications test: %s\n",
+ fd_path, ret > 0 ? strerror(ret) :
+ "channel not connected");
goto error;
}
RTE_LOG(INFO, GUEST_CHANNEL, "Channel '%s' is now connected\n", fd_path);
diff --git a/lib/librte_power/rte_power_kvm_vm.c b/lib/librte_power/rte_power_kvm_vm.c
index 7bb2774c..a1badf34 100644
--- a/lib/librte_power/rte_power_kvm_vm.c
+++ b/lib/librte_power/rte_power_kvm_vm.c
@@ -106,7 +106,8 @@ send_msg(unsigned lcore_id, uint32_t scale_direction)
ret = guest_channel_send_msg(&pkt[lcore_id], lcore_id);
if (ret == 0)
return 1;
- RTE_LOG(DEBUG, POWER, "Error sending message: %s\n", strerror(ret));
+ RTE_LOG(DEBUG, POWER, "Error sending message: %s\n",
+ ret > 0 ? strerror(ret) : "channel not connected");
return -1;
}
diff --git a/lib/librte_reorder/Makefile b/lib/librte_reorder/Makefile
index 0c01de16..0d111aad 100644
--- a/lib/librte_reorder/Makefile
+++ b/lib/librte_reorder/Makefile
@@ -49,6 +49,7 @@ SYMLINK-$(CONFIG_RTE_LIBRTE_REORDER)-include := rte_reorder.h
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_REORDER) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_REORDER) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_REORDER) += lib/librte_eal
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index d80faf3b..ca0a1082 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -122,6 +122,8 @@ int
rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
unsigned flags)
{
+ int ret;
+
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
@@ -140,7 +142,9 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* init the ring structure */
memset(r, 0, sizeof(*r));
- snprintf(r->name, sizeof(r->name), "%s", name);
+ ret = snprintf(r->name, sizeof(r->name), "%s", name);
+ if (ret < 0 || ret >= (int)sizeof(r->name))
+ return -ENAMETOOLONG;
r->flags = flags;
r->prod.watermark = count;
r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
@@ -165,6 +169,7 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
ssize_t ring_size;
int mz_flags = 0;
struct rte_ring_list* ring_list = NULL;
+ int ret;
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
@@ -174,6 +179,13 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
return NULL;
}
+ ret = snprintf(mz_name, sizeof(mz_name), "%s%s",
+ RTE_RING_MZ_PREFIX, name);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ rte_errno = ENAMETOOLONG;
+ return NULL;
+ }
+
te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
if (te == NULL) {
RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
@@ -181,8 +193,6 @@ rte_ring_create(const char *name, unsigned count, int socket_id,
return NULL;
}
- snprintf(mz_name, sizeof(mz_name), "%s%s", RTE_RING_MZ_PREFIX, name);
-
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
/* reserve a memory zone for this ring. If we can't get rte_config or
diff --git a/lib/librte_sched/Makefile b/lib/librte_sched/Makefile
index a782cd1a..44cb780f 100644
--- a/lib/librte_sched/Makefile
+++ b/lib/librte_sched/Makefile
@@ -59,6 +59,7 @@ SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include := rte_sched.h rte_bitmap.h rte_sched
SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include += rte_reciprocal.h
# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_net lib/librte_timer
diff --git a/lib/librte_sched/rte_red.c b/lib/librte_sched/rte_red.c
index fdf40576..ade57d1f 100644
--- a/lib/librte_sched/rte_red.c
+++ b/lib/librte_sched/rte_red.c
@@ -77,7 +77,7 @@ __rte_red_init_tables(void)
scale = 1024.0;
- RTE_RED_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
+ RTE_ASSERT(RTE_RED_WQ_LOG2_NUM == RTE_DIM(rte_red_log2_1_minus_Wq));
for (i = RTE_RED_WQ_LOG2_MIN; i <= RTE_RED_WQ_LOG2_MAX; i++) {
double n = (double)i;
diff --git a/lib/librte_sched/rte_red.h b/lib/librte_sched/rte_red.h
index 7f9ac901..ca122275 100644
--- a/lib/librte_sched/rte_red.h
+++ b/lib/librte_sched/rte_red.h
@@ -63,19 +63,6 @@ extern "C" {
#define RTE_RED_INT16_NBITS (sizeof(uint16_t) * CHAR_BIT)
#define RTE_RED_WQ_LOG2_NUM (RTE_RED_WQ_LOG2_MAX - RTE_RED_WQ_LOG2_MIN + 1)
-#ifdef RTE_RED_DEBUG
-
-#define RTE_RED_ASSERT(exp) \
-if (!(exp)) { \
- rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
-}
-
-#else
-
-#define RTE_RED_ASSERT(exp) do { } while(0)
-
-#endif /* RTE_RED_DEBUG */
-
/**
* Externs
*
@@ -246,8 +233,8 @@ rte_red_enqueue_empty(const struct rte_red_config *red_cfg,
{
uint64_t time_diff = 0, m = 0;
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
red->count ++;
@@ -361,8 +348,8 @@ rte_red_enqueue_nonempty(const struct rte_red_config *red_cfg,
struct rte_red *red,
const unsigned q)
{
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
/**
* EWMA filter (Sally Floyd and Van Jacobson):
@@ -424,8 +411,8 @@ rte_red_enqueue(const struct rte_red_config *red_cfg,
const unsigned q,
const uint64_t time)
{
- RTE_RED_ASSERT(red_cfg != NULL);
- RTE_RED_ASSERT(red != NULL);
+ RTE_ASSERT(red_cfg != NULL);
+ RTE_ASSERT(red != NULL);
if (q != 0) {
return rte_red_enqueue_nonempty(red_cfg, red, q);
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 1609ea87..86964234 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -1084,10 +1084,17 @@ rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex
s->stats.n_bytes_tc[tc_index] += pkt_len;
}
+#ifdef RTE_SCHED_RED
static inline void
rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
- uint32_t qindex,
- struct rte_mbuf *pkt, uint32_t red)
+ uint32_t qindex,
+ struct rte_mbuf *pkt, uint32_t red)
+#else
+static inline void
+rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
+ uint32_t qindex,
+ struct rte_mbuf *pkt, __rte_unused uint32_t red)
+#endif
{
struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
uint32_t tc_index = (qindex >> 2) & 0x3;
@@ -1110,10 +1117,17 @@ rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex,
qe->stats.n_bytes += pkt_len;
}
+#ifdef RTE_SCHED_RED
static inline void
rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
- uint32_t qindex,
- struct rte_mbuf *pkt, uint32_t red)
+ uint32_t qindex,
+ struct rte_mbuf *pkt, uint32_t red)
+#else
+static inline void
+rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
+ uint32_t qindex,
+ struct rte_mbuf *pkt, __rte_unused uint32_t red)
+#endif
{
struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
uint32_t pkt_len = pkt->pkt_len;
diff --git a/lib/librte_table/rte_table_acl.c b/lib/librte_table/rte_table_acl.c
index c1eb8488..8f1f8ceb 100644
--- a/lib/librte_table/rte_table_acl.c
+++ b/lib/librte_table/rte_table_acl.c
@@ -236,8 +236,6 @@ rte_table_acl_build(struct rte_table_acl *acl, struct rte_acl_ctx **acl_ctx)
return -1;
}
- rte_acl_dump(ctx);
-
*acl_ctx = ctx;
return 0;
}
diff --git a/lib/librte_table/rte_table_lpm.c b/lib/librte_table/rte_table_lpm.c
index cdeb0f5a..598b79f5 100644
--- a/lib/librte_table/rte_table_lpm.c
+++ b/lib/librte_table/rte_table_lpm.c
@@ -44,7 +44,9 @@
#include "rte_table_lpm.h"
-#define RTE_TABLE_LPM_MAX_NEXT_HOPS 256
+#ifndef RTE_TABLE_LPM_MAX_NEXT_HOPS
+#define RTE_TABLE_LPM_MAX_NEXT_HOPS 65536
+#endif
#ifdef RTE_TABLE_STATS_COLLECT
@@ -74,7 +76,7 @@ struct rte_table_lpm {
/* Next Hop Table (NHT) */
uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
- uint32_t nht[0] __rte_cache_aligned;
+ uint8_t nht[0] __rte_cache_aligned;
};
static void *
@@ -188,7 +190,7 @@ nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
uint32_t i;
for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
- uint32_t *nht_entry = &lpm->nht[i * lpm->entry_size];
+ uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
lpm->entry_unique_size) == 0)) {
@@ -242,7 +244,7 @@ rte_table_lpm_entry_add(
/* Find existing or free NHT entry */
if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
- uint32_t *nht_entry;
+ uint8_t *nht_entry;
if (nht_find_free(lpm, &nht_pos) == 0) {
RTE_LOG(ERR, TABLE, "%s: NHT full\n", __func__);
diff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile
index e33ff53e..538adb0b 100644
--- a/lib/librte_vhost/Makefile
+++ b/lib/librte_vhost/Makefile
@@ -36,7 +36,7 @@ LIB = librte_vhost.a
EXPORT_MAP := rte_vhost_version.map
-LIBABIVER := 2
+LIBABIVER := 3
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -D_FILE_OFFSET_BITS=64
ifeq ($(CONFIG_RTE_LIBRTE_VHOST_USER),y)
@@ -66,6 +66,7 @@ SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_virtio_net.h
DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_eal
DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_mempool
DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_net
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index 3d8709e5..5ceaa8a5 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -20,3 +20,13 @@ DPDK_2.1 {
rte_vhost_driver_unregister;
} DPDK_2.0;
+
+DPDK_16.07 {
+ global:
+
+ rte_vhost_avail_entries;
+ rte_vhost_get_ifname;
+ rte_vhost_get_numa_node;
+ rte_vhost_get_queue_num;
+
+} DPDK_2.1;
diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 600b20b4..9caa6221 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -51,125 +51,12 @@
#include <rte_mempool.h>
#include <rte_ether.h>
-struct rte_mbuf;
-
-#define VHOST_MEMORY_MAX_NREGIONS 8
-
-/* Used to indicate that the device is running on a data core */
-#define VIRTIO_DEV_RUNNING 1
-
-/* Backend value set by guest. */
-#define VIRTIO_DEV_STOPPED -1
-
+#define RTE_VHOST_USER_CLIENT (1ULL << 0)
+#define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
/* Enum for virtqueue management. */
enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
-#define BUF_VECTOR_MAX 256
-
-/**
- * Structure contains buffer address, length and descriptor index
- * from vring to do scatter RX.
- */
-struct buf_vector {
- uint64_t buf_addr;
- uint32_t buf_len;
- uint32_t desc_idx;
-};
-
-/**
- * Structure contains variables relevant to RX/TX virtqueues.
- */
-struct vhost_virtqueue {
- struct vring_desc *desc; /**< Virtqueue descriptor ring. */
- struct vring_avail *avail; /**< Virtqueue available ring. */
- struct vring_used *used; /**< Virtqueue used ring. */
- uint32_t size; /**< Size of descriptor ring. */
- uint32_t backend; /**< Backend value to determine if device should started/stopped. */
- uint16_t vhost_hlen; /**< Vhost header length (varies depending on RX merge buffers. */
- volatile uint16_t last_used_idx; /**< Last index used on the available ring */
- volatile uint16_t last_used_idx_res; /**< Used for multiple devices reserving buffers. */
-#define VIRTIO_INVALID_EVENTFD (-1)
-#define VIRTIO_UNINITIALIZED_EVENTFD (-2)
- int callfd; /**< Used to notify the guest (trigger interrupt). */
- int kickfd; /**< Currently unused as polling mode is enabled. */
- int enabled;
- uint64_t log_guest_addr; /**< Physical address of used ring, for logging */
- uint64_t reserved[15]; /**< Reserve some spaces for future extension. */
- struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
-} __rte_cache_aligned;
-
-/* Old kernels have no such macro defined */
-#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
- #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
-#endif
-
-
-/*
- * Make an extra wrapper for VIRTIO_NET_F_MQ and
- * VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX as they are
- * introduced since kernel v3.8. This makes our
- * code buildable for older kernel.
- */
-#ifdef VIRTIO_NET_F_MQ
- #define VHOST_MAX_QUEUE_PAIRS VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
- #define VHOST_SUPPORTS_MQ (1ULL << VIRTIO_NET_F_MQ)
-#else
- #define VHOST_MAX_QUEUE_PAIRS 1
- #define VHOST_SUPPORTS_MQ 0
-#endif
-
-/*
- * Define virtio 1.0 for older kernels
- */
-#ifndef VIRTIO_F_VERSION_1
- #define VIRTIO_F_VERSION_1 32
-#endif
-
-/**
- * Device structure contains all configuration information relating to the device.
- */
-struct virtio_net {
- struct virtio_memory *mem; /**< QEMU memory and memory region information. */
- uint64_t features; /**< Negotiated feature set. */
- uint64_t protocol_features; /**< Negotiated protocol feature set. */
- uint64_t device_fh; /**< device identifier. */
- uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
-#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
- char ifname[IF_NAME_SZ]; /**< Name of the tap device or socket path. */
- uint32_t virt_qp_nb; /**< number of queue pair we have allocated */
- void *priv; /**< private context */
- uint64_t log_size; /**< Size of log area */
- uint64_t log_base; /**< Where dirty pages are logged */
- struct ether_addr mac; /**< MAC address */
- rte_atomic16_t broadcast_rarp; /**< A flag to tell if we need broadcast rarp packet */
- uint64_t reserved[61]; /**< Reserve some spaces for future extension. */
- struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; /**< Contains all virtqueue information. */
-} __rte_cache_aligned;
-
-/**
- * Information relating to memory regions including offsets to addresses in QEMUs memory file.
- */
-struct virtio_memory_regions {
- uint64_t guest_phys_address; /**< Base guest physical address of region. */
- uint64_t guest_phys_address_end; /**< End guest physical address of region. */
- uint64_t memory_size; /**< Size of region. */
- uint64_t userspace_address; /**< Base userspace address of region. */
- uint64_t address_offset; /**< Offset of region for address translation. */
-};
-
-
-/**
- * Memory structure includes region and mapping information.
- */
-struct virtio_memory {
- uint64_t base_address; /**< Base QEMU userspace address of the memory file. */
- uint64_t mapped_address; /**< Mapped address of memory file base in our applications memory space. */
- uint64_t mapped_size; /**< Total size of memory file. */
- uint32_t nregions; /**< Number of memory regions. */
- struct virtio_memory_regions regions[0]; /**< Memory region information. */
-};
-
/**
* Device and vring operations.
*
@@ -178,45 +65,13 @@ struct virtio_memory {
*
*/
struct virtio_net_device_ops {
- int (*new_device)(struct virtio_net *); /**< Add device. */
- void (*destroy_device)(volatile struct virtio_net *); /**< Remove device. */
-
- int (*vring_state_changed)(struct virtio_net *dev, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
-};
-
-static inline uint16_t __attribute__((always_inline))
-rte_vring_available_entries(struct virtio_net *dev, uint16_t queue_id)
-{
- struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
-
- if (!vq->enabled)
- return 0;
+ int (*new_device)(int vid); /**< Add device. */
+ void (*destroy_device)(int vid); /**< Remove device. */
- return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx_res;
-}
-
-/**
- * Function to convert guest physical addresses to vhost virtual addresses.
- * This is used to convert guest virtio buffer addresses.
- */
-static inline uint64_t __attribute__((always_inline))
-gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
-{
- struct virtio_memory_regions *region;
- uint32_t regionidx;
- uint64_t vhost_va = 0;
-
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- region = &dev->mem->regions[regionidx];
- if ((guest_pa >= region->guest_phys_address) &&
- (guest_pa <= region->guest_phys_address_end)) {
- vhost_va = region->address_offset + guest_pa;
- break;
- }
- }
- return vhost_va;
-}
+ int (*vring_state_changed)(int vid, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
+ void *reserved[5]; /**< Reserved for future extension */
+};
/**
* Disable features in feature_mask. Returns 0 on success.
@@ -231,13 +86,16 @@ int rte_vhost_feature_enable(uint64_t feature_mask);
/* Returns currently supported vhost features */
uint64_t rte_vhost_feature_get(void);
-int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable);
+int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
-/* Register vhost driver. dev_name could be different for multiple instance support. */
-int rte_vhost_driver_register(const char *dev_name);
+/**
+ * Register vhost driver. path could be different for multiple
+ * instance support.
+ */
+int rte_vhost_driver_register(const char *path, uint64_t flags);
/* Unregister vhost driver. This is only meaningful to vhost user. */
-int rte_vhost_driver_unregister(const char *dev_name);
+int rte_vhost_driver_unregister(const char *path);
/* Register callbacks. */
int rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const);
@@ -245,12 +103,65 @@ int rte_vhost_driver_callback_register(struct virtio_net_device_ops const * cons
int rte_vhost_driver_session_start(void);
/**
+ * Get the numa node from which the virtio net device's memory
+ * is allocated.
+ *
+ * @param vid
+ * virtio-net device ID
+ *
+ * @return
+ * The numa node, -1 on failure
+ */
+int rte_vhost_get_numa_node(int vid);
+
+/**
+ * Get the number of queues the device supports.
+ *
+ * @param vid
+ * virtio-net device ID
+ *
+ * @return
+ * The number of queues, 0 on failure
+ */
+uint32_t rte_vhost_get_queue_num(int vid);
+
+/**
+ * Get the virtio net device's ifname. For vhost-cuse, ifname is the
+ * path of the char device. For vhost-user, ifname is the vhost-user
+ * socket file path.
+ *
+ * @param vid
+ * virtio-net device ID
+ * @param buf
+ * The buffer to stored the queried ifname
+ * @param len
+ * The length of buf
+ *
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_get_ifname(int vid, char *buf, size_t len);
+
+/**
+ * Get how many avail entries are left in the queue
+ *
+ * @param vid
+ * virtio-net device ID
+ * @param queue_id
+ * virtio queue index
+ *
+ * @return
+ * num of avail entires left
+ */
+uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
+
+/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtual device. A packet
* count is returned to indicate the number of packets that were succesfully
* added to the RX queue.
- * @param dev
- * virtio-net device
+ * @param vid
+ * virtio-net device ID
* @param queue_id
* virtio queue index in mq case
* @param pkts
@@ -260,14 +171,14 @@ int rte_vhost_driver_session_start(void);
* @return
* num of packets enqueued
*/
-uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count);
/**
* This function gets guest buffers from the virtio device TX virtqueue,
* construct host mbufs, copies guest buffer content to host mbufs and
* store them in pkts to be processed.
- * @param dev
+ * @param vid
* virtio-net device
* @param queue_id
* virtio queue index in mq case
@@ -280,7 +191,7 @@ uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
* @return
* num of packets dequeued
*/
-uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
#endif /* _VIRTIO_NET_H_ */
diff --git a/lib/librte_vhost/vhost-net.h b/lib/librte_vhost/vhost-net.h
index f193a1f6..38593a29 100644
--- a/lib/librte_vhost/vhost-net.h
+++ b/lib/librte_vhost/vhost-net.h
@@ -43,6 +43,128 @@
#include "rte_virtio_net.h"
+/* Used to indicate that the device is running on a data core */
+#define VIRTIO_DEV_RUNNING 1
+
+/* Backend value set by guest. */
+#define VIRTIO_DEV_STOPPED -1
+
+#define BUF_VECTOR_MAX 256
+
+/**
+ * Structure contains buffer address, length and descriptor index
+ * from vring to do scatter RX.
+ */
+struct buf_vector {
+ uint64_t buf_addr;
+ uint32_t buf_len;
+ uint32_t desc_idx;
+};
+
+/**
+ * Structure contains variables relevant to RX/TX virtqueues.
+ */
+struct vhost_virtqueue {
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ uint32_t size;
+
+ /* Last index used on the available ring */
+ volatile uint16_t last_used_idx;
+#define VIRTIO_INVALID_EVENTFD (-1)
+#define VIRTIO_UNINITIALIZED_EVENTFD (-2)
+
+ /* Backend value to determine if device should started/stopped */
+ int backend;
+ /* Used to notify the guest (trigger interrupt) */
+ int callfd;
+ /* Currently unused as polling mode is enabled */
+ int kickfd;
+ int enabled;
+
+ /* Physical address of used ring, for logging */
+ uint64_t log_guest_addr;
+} __rte_cache_aligned;
+
+/* Old kernels have no such macro defined */
+#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
+ #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
+#endif
+
+
+/*
+ * Make an extra wrapper for VIRTIO_NET_F_MQ and
+ * VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX as they are
+ * introduced since kernel v3.8. This makes our
+ * code buildable for older kernel.
+ */
+#ifdef VIRTIO_NET_F_MQ
+ #define VHOST_MAX_QUEUE_PAIRS VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
+ #define VHOST_SUPPORTS_MQ (1ULL << VIRTIO_NET_F_MQ)
+#else
+ #define VHOST_MAX_QUEUE_PAIRS 1
+ #define VHOST_SUPPORTS_MQ 0
+#endif
+
+/*
+ * Define virtio 1.0 for older kernels
+ */
+#ifndef VIRTIO_F_VERSION_1
+ #define VIRTIO_F_VERSION_1 32
+#endif
+
+/**
+ * Device structure contains all configuration information relating
+ * to the device.
+ */
+struct virtio_net {
+ /* Frontend (QEMU) memory and memory region information */
+ struct virtio_memory *mem;
+ uint64_t features;
+ uint64_t protocol_features;
+ int vid;
+ uint32_t flags;
+ uint16_t vhost_hlen;
+ /* to tell if we need broadcast rarp packet */
+ rte_atomic16_t broadcast_rarp;
+ uint32_t virt_qp_nb;
+ struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
+#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
+ char ifname[IF_NAME_SZ];
+ uint64_t log_size;
+ uint64_t log_base;
+ uint64_t log_addr;
+ struct ether_addr mac;
+
+} __rte_cache_aligned;
+
+/**
+ * Information relating to memory regions including offsets to
+ * addresses in QEMUs memory file.
+ */
+struct virtio_memory_regions {
+ uint64_t guest_phys_address;
+ uint64_t guest_phys_address_end;
+ uint64_t memory_size;
+ uint64_t userspace_address;
+ uint64_t address_offset;
+};
+
+
+/**
+ * Memory structure includes region and mapping information.
+ */
+struct virtio_memory {
+ /* Base QEMU userspace address of the memory file. */
+ uint64_t base_address;
+ uint64_t mapped_address;
+ uint64_t mapped_size;
+ uint32_t nregions;
+ struct virtio_memory_regions regions[0];
+};
+
+
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
@@ -57,9 +179,9 @@
char packet[VHOST_MAX_PRINT_BUFF]; \
\
if ((header)) \
- snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%" PRIu64 ") Header size %d: ", (device->device_fh), (size)); \
+ snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
else \
- snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%" PRIu64 ") Packet size %d: ", (device->device_fh), (size)); \
+ snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
for (index = 0; index < (size); index++) { \
snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
"%02hhx ", pkt_addr[index]); \
@@ -74,37 +196,51 @@
#define PRINT_PACKET(device, addr, size, header) do {} while (0)
#endif
-
-/*
- * Structure used to identify device context.
+/**
+ * Function to convert guest physical addresses to vhost virtual addresses.
+ * This is used to convert guest virtio buffer addresses.
*/
-struct vhost_device_ctx {
- pid_t pid; /* PID of process calling the IOCTL. */
- uint64_t fh; /* Populated with fi->fh to track the device index. */
-};
-
-int vhost_new_device(struct vhost_device_ctx);
-void vhost_destroy_device(struct vhost_device_ctx);
-
-void vhost_set_ifname(struct vhost_device_ctx,
- const char *if_name, unsigned int if_len);
-
-int vhost_get_features(struct vhost_device_ctx, uint64_t *);
-int vhost_set_features(struct vhost_device_ctx, uint64_t *);
-
-int vhost_set_vring_num(struct vhost_device_ctx, struct vhost_vring_state *);
-int vhost_set_vring_addr(struct vhost_device_ctx, struct vhost_vring_addr *);
-int vhost_set_vring_base(struct vhost_device_ctx, struct vhost_vring_state *);
-int vhost_get_vring_base(struct vhost_device_ctx,
- uint32_t, struct vhost_vring_state *);
-
-int vhost_set_vring_kick(struct vhost_device_ctx, struct vhost_vring_file *);
-int vhost_set_vring_call(struct vhost_device_ctx, struct vhost_vring_file *);
-
-int vhost_set_backend(struct vhost_device_ctx, struct vhost_vring_file *);
-
-int vhost_set_owner(struct vhost_device_ctx);
-int vhost_reset_owner(struct vhost_device_ctx);
+static inline uint64_t __attribute__((always_inline))
+gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
+{
+ struct virtio_memory_regions *region;
+ uint32_t regionidx;
+ uint64_t vhost_va = 0;
+
+ for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
+ region = &dev->mem->regions[regionidx];
+ if ((guest_pa >= region->guest_phys_address) &&
+ (guest_pa <= region->guest_phys_address_end)) {
+ vhost_va = region->address_offset + guest_pa;
+ break;
+ }
+ }
+ return vhost_va;
+}
+
+struct virtio_net_device_ops const *notify_ops;
+struct virtio_net *get_device(int vid);
+
+int vhost_new_device(void);
+void vhost_destroy_device(int);
+
+void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
+
+int vhost_get_features(int, uint64_t *);
+int vhost_set_features(int, uint64_t *);
+
+int vhost_set_vring_num(int, struct vhost_vring_state *);
+int vhost_set_vring_addr(int, struct vhost_vring_addr *);
+int vhost_set_vring_base(int, struct vhost_vring_state *);
+int vhost_get_vring_base(int, uint32_t, struct vhost_vring_state *);
+
+int vhost_set_vring_kick(int, struct vhost_vring_file *);
+int vhost_set_vring_call(int, struct vhost_vring_file *);
+
+int vhost_set_backend(int, struct vhost_vring_file *);
+
+int vhost_set_owner(int);
+int vhost_reset_owner(int);
/*
* Backend-specific cleanup. Defined by vhost-cuse and vhost-user.
diff --git a/lib/librte_vhost/vhost_cuse/vhost-net-cdev.c b/lib/librte_vhost/vhost_cuse/vhost-net-cdev.c
index c613e68e..5d150116 100644
--- a/lib/librte_vhost/vhost_cuse/vhost-net-cdev.c
+++ b/lib/librte_vhost/vhost_cuse/vhost-net-cdev.c
@@ -60,17 +60,18 @@ static const char default_cdev[] = "vhost-net";
static struct fuse_session *session;
/*
- * Returns vhost_device_ctx from given fuse_req_t. The index is populated later
- * when the device is added to the device linked list.
+ * Returns vhost_cuse_device_ctx from given fuse_req_t. The
+ * index is populated later when the device is added to the
+ * device linked list.
*/
-static struct vhost_device_ctx
+static struct vhost_cuse_device_ctx
fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
{
- struct vhost_device_ctx ctx;
+ struct vhost_cuse_device_ctx ctx;
struct fuse_ctx const *const req_ctx = fuse_req_ctx(req);
ctx.pid = req_ctx->pid;
- ctx.fh = fi->fh;
+ ctx.vid = (int)fi->fh;
return ctx;
}
@@ -82,19 +83,18 @@ fuse_req_to_vhost_ctx(fuse_req_t req, struct fuse_file_info *fi)
static void
vhost_net_open(fuse_req_t req, struct fuse_file_info *fi)
{
- struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
- int err = 0;
+ int vid = 0;
- err = vhost_new_device(ctx);
- if (err == -1) {
+ vid = vhost_new_device();
+ if (vid == -1) {
fuse_reply_err(req, EPERM);
return;
}
- fi->fh = err;
+ fi->fh = vid;
RTE_LOG(INFO, VHOST_CONFIG,
- "(%"PRIu64") Device configuration started\n", fi->fh);
+ "(%d) device configuration started\n", vid);
fuse_reply_open(req, fi);
}
@@ -105,19 +105,19 @@ static void
vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
{
int err = 0;
- struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
+ struct vhost_cuse_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
- vhost_destroy_device(ctx);
- RTE_LOG(INFO, VHOST_CONFIG, "(%"PRIu64") Device released\n", ctx.fh);
+ vhost_destroy_device(ctx.vid);
+ RTE_LOG(INFO, VHOST_CONFIG, "(%d) device released\n", ctx.vid);
fuse_reply_err(req, err);
}
/*
* Boilerplate code for CUSE IOCTL
- * Implicit arguments: ctx, req, result.
+ * Implicit arguments: vid, req, result.
*/
#define VHOST_IOCTL(func) do { \
- result = (func)(ctx); \
+ result = (func)(vid); \
fuse_reply_ioctl(req, result, NULL, 0); \
} while (0)
@@ -134,41 +134,41 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
/*
* Boilerplate code for CUSE Read IOCTL
- * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
+ * Implicit arguments: vid, req, result, in_bufsz, in_buf.
*/
#define VHOST_IOCTL_R(type, var, func) do { \
if (!in_bufsz) { \
VHOST_IOCTL_RETRY(sizeof(type), 0);\
} else { \
(var) = *(const type*)in_buf; \
- result = func(ctx, &(var)); \
+ result = func(vid, &(var)); \
fuse_reply_ioctl(req, result, NULL, 0);\
} \
} while (0)
/*
* Boilerplate code for CUSE Write IOCTL
- * Implicit arguments: ctx, req, result, out_bufsz.
+ * Implicit arguments: vid, req, result, out_bufsz.
*/
#define VHOST_IOCTL_W(type, var, func) do { \
if (!out_bufsz) { \
VHOST_IOCTL_RETRY(0, sizeof(type));\
} else { \
- result = (func)(ctx, &(var));\
+ result = (func)(vid, &(var));\
fuse_reply_ioctl(req, result, &(var), sizeof(type));\
} \
} while (0)
/*
* Boilerplate code for CUSE Read/Write IOCTL
- * Implicit arguments: ctx, req, result, in_bufsz, in_buf.
+ * Implicit arguments: vid, req, result, in_bufsz, in_buf.
*/
#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do { \
if (!in_bufsz) { \
VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2));\
} else { \
(var1) = *(const type1*) (in_buf); \
- result = (func)(ctx, (var1), &(var2)); \
+ result = (func)(vid, (var1), &(var2)); \
fuse_reply_ioctl(req, result, &(var2), sizeof(type2));\
} \
} while (0)
@@ -183,18 +183,19 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
struct fuse_file_info *fi, __rte_unused unsigned flags,
const void *in_buf, size_t in_bufsz, size_t out_bufsz)
{
- struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
+ struct vhost_cuse_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
struct vhost_vring_file file;
struct vhost_vring_state state;
struct vhost_vring_addr addr;
uint64_t features;
uint32_t index;
int result = 0;
+ int vid = ctx.vid;
switch (cmd) {
case VHOST_NET_SET_BACKEND:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
+ "(%d) IOCTL: VHOST_NET_SET_BACKEND\n", ctx.vid);
if (!in_buf) {
VHOST_IOCTL_RETRY(sizeof(file), 0);
break;
@@ -206,32 +207,32 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
case VHOST_GET_FEATURES:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
+ "(%d) IOCTL: VHOST_GET_FEATURES\n", vid);
VHOST_IOCTL_W(uint64_t, features, vhost_get_features);
break;
case VHOST_SET_FEATURES:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_FEATURES\n", vid);
VHOST_IOCTL_R(uint64_t, features, vhost_set_features);
break;
case VHOST_RESET_OWNER:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
+ "(%d) IOCTL: VHOST_RESET_OWNER\n", vid);
VHOST_IOCTL(vhost_reset_owner);
break;
case VHOST_SET_OWNER:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_OWNER\n", vid);
VHOST_IOCTL(vhost_set_owner);
break;
case VHOST_SET_MEM_TABLE:
/*TODO fix race condition.*/
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_MEM_TABLE\n", vid);
static struct vhost_memory mem_temp;
switch (in_bufsz) {
@@ -264,28 +265,28 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
case VHOST_SET_VRING_NUM:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_VRING_NUM\n", vid);
VHOST_IOCTL_R(struct vhost_vring_state, state,
vhost_set_vring_num);
break;
case VHOST_SET_VRING_BASE:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_VRING_BASE\n", vid);
VHOST_IOCTL_R(struct vhost_vring_state, state,
vhost_set_vring_base);
break;
case VHOST_GET_VRING_BASE:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
+ "(%d) IOCTL: VHOST_GET_VRING_BASE\n", vid);
VHOST_IOCTL_RW(uint32_t, index,
struct vhost_vring_state, state, vhost_get_vring_base);
break;
case VHOST_SET_VRING_ADDR:
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
+ "(%d) IOCTL: VHOST_SET_VRING_ADDR\n", vid);
VHOST_IOCTL_R(struct vhost_vring_addr, addr,
vhost_set_vring_addr);
break;
@@ -294,12 +295,10 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
case VHOST_SET_VRING_CALL:
if (cmd == VHOST_SET_VRING_KICK)
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n",
- ctx.fh);
+ "(%d) IOCTL: VHOST_SET_VRING_KICK\n", vid);
else
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n",
- ctx.fh);
+ "(%d) IOCTL: VHOST_SET_VRING_CALL\n", vid);
if (!in_buf)
VHOST_IOCTL_RETRY(sizeof(struct vhost_vring_file), 0);
else {
@@ -315,10 +314,10 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
}
file.fd = fd;
if (cmd == VHOST_SET_VRING_KICK) {
- result = vhost_set_vring_kick(ctx, &file);
+ result = vhost_set_vring_kick(vid, &file);
fuse_reply_ioctl(req, result, NULL, 0);
} else {
- result = vhost_set_vring_call(ctx, &file);
+ result = vhost_set_vring_call(vid, &file);
fuse_reply_ioctl(req, result, NULL, 0);
}
}
@@ -326,17 +325,17 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
default:
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
+ "(%d) IOCTL: DOESN NOT EXIST\n", vid);
result = -1;
fuse_reply_ioctl(req, result, NULL, 0);
}
if (result < 0)
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
+ "(%d) IOCTL: FAIL\n", vid);
else
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
+ "(%d) IOCTL: SUCCESS\n", vid);
}
/*
@@ -353,7 +352,7 @@ static const struct cuse_lowlevel_ops vhost_net_ops = {
* vhost_net_device_ops are also passed when the device is registered in app.
*/
int
-rte_vhost_driver_register(const char *dev_name)
+rte_vhost_driver_register(const char *dev_name, uint64_t flags)
{
struct cuse_info cuse_info;
char device_name[PATH_MAX] = "";
@@ -365,6 +364,12 @@ rte_vhost_driver_register(const char *dev_name)
char fuse_opt_nomulti[] = FUSE_OPT_NOMULTI;
char *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore, fuse_opt_nomulti};
+ if (flags) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "vhost-cuse does not support any flags so far\n");
+ return -1;
+ }
+
if (access(cuse_device_name, R_OK | W_OK) < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"char device %s can't be accessed, maybe not exist\n",
diff --git a/lib/librte_vhost/vhost_cuse/virtio-net-cdev.c b/lib/librte_vhost/vhost_cuse/virtio-net-cdev.c
index a68a8bd4..552be7d4 100644
--- a/lib/librte_vhost/vhost_cuse/virtio-net-cdev.c
+++ b/lib/librte_vhost/vhost_cuse/virtio-net-cdev.c
@@ -54,7 +54,6 @@
#include "rte_virtio_net.h"
#include "vhost-net.h"
#include "virtio-net-cdev.h"
-#include "virtio-net.h"
#include "eventfd_copy.h"
/* Line size for reading maps file. */
@@ -263,7 +262,7 @@ host_memory_map(pid_t pid, uint64_t addr,
}
int
-cuse_set_mem_table(struct vhost_device_ctx ctx,
+cuse_set_mem_table(struct vhost_cuse_device_ctx ctx,
const struct vhost_memory *mem_regions_addr, uint32_t nregions)
{
uint64_t size = offsetof(struct vhost_memory, regions);
@@ -274,7 +273,7 @@ cuse_set_mem_table(struct vhost_device_ctx ctx,
uint64_t base_address = 0, mapped_address, mapped_size;
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(ctx.vid);
if (dev == NULL)
return -1;
@@ -289,8 +288,8 @@ cuse_set_mem_table(struct vhost_device_ctx ctx,
sizeof(struct virtio_memory_regions) * nregions);
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for dev->mem\n",
- dev->device_fh);
+ "(%d) failed to allocate memory for dev->mem\n",
+ dev->vid);
return -1;
}
@@ -379,7 +378,7 @@ cuse_set_mem_table(struct vhost_device_ctx ctx,
* save it in the device structure.
*/
static int
-get_ifname(struct vhost_device_ctx ctx, struct virtio_net *dev, int tap_fd, int pid)
+get_ifname(int vid, int tap_fd, int pid)
{
int fd_tap;
struct ifreq ifr;
@@ -393,33 +392,32 @@ get_ifname(struct vhost_device_ctx ctx, struct virtio_net *dev, int tap_fd, int
ret = ioctl(fd_tap, TUNGETIFF, &ifr);
if (close(fd_tap) < 0)
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") fd close failed\n",
- dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%d) fd close failed\n", vid);
if (ret >= 0) {
ifr_size = strnlen(ifr.ifr_name, sizeof(ifr.ifr_name));
- vhost_set_ifname(ctx, ifr.ifr_name, ifr_size);
+ vhost_set_ifname(vid, ifr.ifr_name, ifr_size);
} else
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") TUNGETIFF ioctl failed\n",
- dev->device_fh);
+ "(%d) TUNGETIFF ioctl failed\n", vid);
return 0;
}
-int cuse_set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
+int
+cuse_set_backend(struct vhost_cuse_device_ctx ctx,
+ struct vhost_vring_file *file)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(ctx.vid);
if (dev == NULL)
return -1;
if (!(dev->flags & VIRTIO_DEV_RUNNING) && file->fd != VIRTIO_DEV_STOPPED)
- get_ifname(ctx, dev, file->fd, ctx.pid);
+ get_ifname(ctx.vid, file->fd, ctx.pid);
- return vhost_set_backend(ctx, file);
+ return vhost_set_backend(ctx.vid, file);
}
void
diff --git a/lib/librte_vhost/vhost_cuse/virtio-net-cdev.h b/lib/librte_vhost/vhost_cuse/virtio-net-cdev.h
index eb6b0bab..3f67154b 100644
--- a/lib/librte_vhost/vhost_cuse/virtio-net-cdev.h
+++ b/lib/librte_vhost/vhost_cuse/virtio-net-cdev.h
@@ -38,11 +38,19 @@
#include "vhost-net.h"
+/*
+ * Structure used to identify device context.
+ */
+struct vhost_cuse_device_ctx {
+ pid_t pid; /* PID of process calling the IOCTL. */
+ int vid; /* Virtio-net device ID */
+};
+
int
-cuse_set_mem_table(struct vhost_device_ctx ctx,
+cuse_set_mem_table(struct vhost_cuse_device_ctx ctx,
const struct vhost_memory *mem_regions_addr, uint32_t nregions);
int
-cuse_set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *);
+cuse_set_backend(struct vhost_cuse_device_ctx ctx, struct vhost_vring_file *);
#endif
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 750821a4..15ca9562 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -126,10 +126,10 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
}
static inline void
-copy_virtio_net_hdr(struct vhost_virtqueue *vq, uint64_t desc_addr,
+copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
struct virtio_net_hdr_mrg_rxbuf hdr)
{
- if (vq->vhost_hlen == sizeof(struct virtio_net_hdr_mrg_rxbuf))
+ if (dev->vhost_hlen == sizeof(struct virtio_net_hdr_mrg_rxbuf))
*(struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)desc_addr = hdr;
else
*(struct virtio_net_hdr *)(uintptr_t)desc_addr = hdr.hdr;
@@ -137,7 +137,7 @@ copy_virtio_net_hdr(struct vhost_virtqueue *vq, uint64_t desc_addr,
static inline int __attribute__((always_inline))
copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mbuf *m, uint16_t desc_idx, uint32_t *copied)
+ struct rte_mbuf *m, uint16_t desc_idx)
{
uint32_t desc_avail, desc_offset;
uint32_t mbuf_avail, mbuf_offset;
@@ -147,21 +147,20 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
desc = &vq->desc[desc_idx];
- if (unlikely(desc->len < vq->vhost_hlen))
+ if (unlikely(desc->len < dev->vhost_hlen))
return -1;
desc_addr = gpa_to_vva(dev, desc->addr);
rte_prefetch0((void *)(uintptr_t)desc_addr);
virtio_enqueue_offload(m, &virtio_hdr.hdr);
- copy_virtio_net_hdr(vq, desc_addr, virtio_hdr);
- vhost_log_write(dev, desc->addr, vq->vhost_hlen);
- PRINT_PACKET(dev, (uintptr_t)desc_addr, vq->vhost_hlen, 0);
+ copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
+ vhost_log_write(dev, desc->addr, dev->vhost_hlen);
+ PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
- desc_offset = vq->vhost_hlen;
- desc_avail = desc->len - vq->vhost_hlen;
+ desc_offset = dev->vhost_hlen;
+ desc_avail = desc->len - dev->vhost_hlen;
- *copied = rte_pktmbuf_pkt_len(m);
mbuf_avail = rte_pktmbuf_data_len(m);
mbuf_offset = 0;
while (mbuf_avail != 0 || m->next != NULL) {
@@ -205,49 +204,6 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
return 0;
}
-/*
- * As many data cores may want to access available buffers
- * they need to be reserved.
- */
-static inline uint32_t
-reserve_avail_buf(struct vhost_virtqueue *vq, uint32_t count,
- uint16_t *start, uint16_t *end)
-{
- uint16_t res_start_idx;
- uint16_t res_end_idx;
- uint16_t avail_idx;
- uint16_t free_entries;
- int success;
-
- count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
-
-again:
- res_start_idx = vq->last_used_idx_res;
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- free_entries = avail_idx - res_start_idx;
- count = RTE_MIN(count, free_entries);
- if (count == 0)
- return 0;
-
- res_end_idx = res_start_idx + count;
-
- /*
- * update vq->last_used_idx_res atomically; try again if failed.
- *
- * TODO: Allow to disable cmpset if no concurrency in application.
- */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res,
- res_start_idx, res_end_idx);
- if (unlikely(!success))
- goto again;
-
- *start = res_start_idx;
- *end = res_end_idx;
-
- return count;
-}
-
/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtio device. A packet
@@ -260,15 +216,15 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
- uint16_t res_start_idx, res_end_idx;
+ uint16_t avail_idx, free_entries, start_idx;
uint16_t desc_indexes[MAX_PKT_BURST];
+ uint16_t used_idx;
uint32_t i;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
- RTE_LOG(ERR, VHOST_DATA,
- "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
- __func__, dev->device_fh, queue_id);
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
return 0;
}
@@ -276,38 +232,43 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
if (unlikely(vq->enabled == 0))
return 0;
- count = reserve_avail_buf(vq, count, &res_start_idx, &res_end_idx);
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ start_idx = vq->last_used_idx;
+ free_entries = avail_idx - start_idx;
+ count = RTE_MIN(count, free_entries);
+ count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
if (count == 0)
return 0;
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") res_start_idx %d| res_end_idx Index %d\n",
- dev->device_fh, res_start_idx, res_end_idx);
+ LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
+ dev->vid, start_idx, start_idx + count);
/* Retrieve all of the desc indexes first to avoid caching issues. */
- rte_prefetch0(&vq->avail->ring[res_start_idx & (vq->size - 1)]);
+ rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]);
for (i = 0; i < count; i++) {
- desc_indexes[i] = vq->avail->ring[(res_start_idx + i) &
- (vq->size - 1)];
+ used_idx = (start_idx + i) & (vq->size - 1);
+ desc_indexes[i] = vq->avail->ring[used_idx];
+ vq->used->ring[used_idx].id = desc_indexes[i];
+ vq->used->ring[used_idx].len = pkts[i]->pkt_len +
+ dev->vhost_hlen;
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used, ring[used_idx]),
+ sizeof(vq->used->ring[used_idx]));
}
rte_prefetch0(&vq->desc[desc_indexes[0]]);
for (i = 0; i < count; i++) {
uint16_t desc_idx = desc_indexes[i];
- uint16_t used_idx = (res_start_idx + i) & (vq->size - 1);
- uint32_t copied;
int err;
- err = copy_mbuf_to_desc(dev, vq, pkts[i], desc_idx, &copied);
-
- vq->used->ring[used_idx].id = desc_idx;
- if (unlikely(err))
- vq->used->ring[used_idx].len = vq->vhost_hlen;
- else
- vq->used->ring[used_idx].len = copied + vq->vhost_hlen;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[used_idx]),
- sizeof(vq->used->ring[used_idx]));
+ err = copy_mbuf_to_desc(dev, vq, pkts[i], desc_idx);
+ if (unlikely(err)) {
+ used_idx = (start_idx + i) & (vq->size - 1);
+ vq->used->ring[used_idx].len = dev->vhost_hlen;
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used, ring[used_idx]),
+ sizeof(vq->used->ring[used_idx]));
+ }
if (i + 1 < count)
rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
@@ -315,12 +276,8 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
rte_smp_wmb();
- /* Wait until it's our turn to add our buffer to the used ring. */
- while (unlikely(vq->last_used_idx != res_start_idx))
- rte_pause();
-
*(volatile uint16_t *)&vq->used->idx += count;
- vq->last_used_idx = res_end_idx;
+ vq->last_used_idx += count;
vhost_log_used_vring(dev, vq,
offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
@@ -337,7 +294,8 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
static inline int
fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
- uint32_t *allocated, uint32_t *vec_idx)
+ uint32_t *allocated, uint32_t *vec_idx,
+ struct buf_vector *buf_vec)
{
uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
uint32_t vec_id = *vec_idx;
@@ -348,9 +306,9 @@ fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
return -1;
len += vq->desc[idx].len;
- vq->buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
- vq->buf_vec[vec_id].buf_len = vq->desc[idx].len;
- vq->buf_vec[vec_id].desc_idx = idx;
+ buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
+ buf_vec[vec_id].buf_len = vq->desc[idx].len;
+ buf_vec[vec_id].desc_idx = idx;
vec_id++;
if ((vq->desc[idx].flags & VRING_DESC_F_NEXT) == 0)
@@ -366,39 +324,30 @@ fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
}
/*
- * As many data cores may want to access available buffers concurrently,
- * they need to be reserved.
- *
* Returns -1 on fail, 0 on success
*/
static inline int
reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
- uint16_t *start, uint16_t *end)
+ uint16_t *end, struct buf_vector *buf_vec)
{
- uint16_t res_start_idx;
- uint16_t res_cur_idx;
+ uint16_t cur_idx;
uint16_t avail_idx;
- uint32_t allocated;
- uint32_t vec_idx;
- uint16_t tries;
+ uint32_t allocated = 0;
+ uint32_t vec_idx = 0;
+ uint16_t tries = 0;
-again:
- res_start_idx = vq->last_used_idx_res;
- res_cur_idx = res_start_idx;
+ cur_idx = vq->last_used_idx;
- allocated = 0;
- vec_idx = 0;
- tries = 0;
while (1) {
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- if (unlikely(res_cur_idx == avail_idx))
+ if (unlikely(cur_idx == avail_idx))
return -1;
- if (unlikely(fill_vec_buf(vq, res_cur_idx, &allocated,
- &vec_idx) < 0))
+ if (unlikely(fill_vec_buf(vq, cur_idx, &allocated,
+ &vec_idx, buf_vec) < 0))
return -1;
- res_cur_idx++;
+ cur_idx++;
tries++;
if (allocated >= size)
@@ -413,27 +362,19 @@ again:
return -1;
}
- /*
- * update vq->last_used_idx_res atomically.
- * retry again if failed.
- */
- if (rte_atomic16_cmpset(&vq->last_used_idx_res,
- res_start_idx, res_cur_idx) == 0)
- goto again;
-
- *start = res_start_idx;
- *end = res_cur_idx;
+ *end = cur_idx;
return 0;
}
static inline uint32_t __attribute__((always_inline))
copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint16_t res_start_idx, uint16_t res_end_idx,
- struct rte_mbuf *m)
+ uint16_t end_idx, struct rte_mbuf *m,
+ struct buf_vector *buf_vec)
{
struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
uint32_t vec_idx = 0;
- uint16_t cur_idx = res_start_idx;
+ uint16_t start_idx = vq->last_used_idx;
+ uint16_t cur_idx = start_idx;
uint64_t desc_addr;
uint32_t mbuf_offset, mbuf_avail;
uint32_t desc_offset, desc_avail;
@@ -443,34 +384,33 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (unlikely(m == NULL))
return 0;
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") Current Index %d| End Index %d\n",
- dev->device_fh, cur_idx, res_end_idx);
+ LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+ dev->vid, cur_idx, end_idx);
- if (vq->buf_vec[vec_idx].buf_len < vq->vhost_hlen)
+ if (buf_vec[vec_idx].buf_len < dev->vhost_hlen)
return -1;
- desc_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
rte_prefetch0((void *)(uintptr_t)desc_addr);
- virtio_hdr.num_buffers = res_end_idx - res_start_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") RX: Num merge buffers %d\n",
- dev->device_fh, virtio_hdr.num_buffers);
+ virtio_hdr.num_buffers = end_idx - start_idx;
+ LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
+ dev->vid, virtio_hdr.num_buffers);
virtio_enqueue_offload(m, &virtio_hdr.hdr);
- copy_virtio_net_hdr(vq, desc_addr, virtio_hdr);
- vhost_log_write(dev, vq->buf_vec[vec_idx].buf_addr, vq->vhost_hlen);
- PRINT_PACKET(dev, (uintptr_t)desc_addr, vq->vhost_hlen, 0);
+ copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
+ vhost_log_write(dev, buf_vec[vec_idx].buf_addr, dev->vhost_hlen);
+ PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
- desc_avail = vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;
- desc_offset = vq->vhost_hlen;
+ desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
+ desc_offset = dev->vhost_hlen;
mbuf_avail = rte_pktmbuf_data_len(m);
mbuf_offset = 0;
while (mbuf_avail != 0 || m->next != NULL) {
/* done with current desc buf, get the next one */
if (desc_avail == 0) {
- desc_idx = vq->buf_vec[vec_idx].desc_idx;
+ desc_idx = buf_vec[vec_idx].desc_idx;
if (!(vq->desc[desc_idx].flags & VRING_DESC_F_NEXT)) {
/* Update used ring with desc information */
@@ -484,12 +424,12 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
vec_idx++;
- desc_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
/* Prefetch buffer address. */
rte_prefetch0((void *)(uintptr_t)desc_addr);
desc_offset = 0;
- desc_avail = vq->buf_vec[vec_idx].buf_len;
+ desc_avail = buf_vec[vec_idx].buf_len;
}
/* done with current mbuf, get the next one */
@@ -504,7 +444,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
cpy_len);
- vhost_log_write(dev, vq->buf_vec[vec_idx].buf_addr + desc_offset,
+ vhost_log_write(dev, buf_vec[vec_idx].buf_addr + desc_offset,
cpy_len);
PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
cpy_len, 0);
@@ -516,13 +456,13 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
used_idx = cur_idx & (vq->size - 1);
- vq->used->ring[used_idx].id = vq->buf_vec[vec_idx].desc_idx;
+ vq->used->ring[used_idx].id = buf_vec[vec_idx].desc_idx;
vq->used->ring[used_idx].len = desc_offset;
vhost_log_used_vring(dev, vq,
offsetof(struct vring_used, ring[used_idx]),
sizeof(vq->used->ring[used_idx]));
- return res_end_idx - res_start_idx;
+ return end_idx - start_idx;
}
static inline uint32_t __attribute__((always_inline))
@@ -531,14 +471,13 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
{
struct vhost_virtqueue *vq;
uint32_t pkt_idx = 0, nr_used = 0;
- uint16_t start, end;
+ uint16_t end;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
- dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
- RTE_LOG(ERR, VHOST_DATA,
- "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
- __func__, dev->device_fh, queue_id);
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
return 0;
}
@@ -551,27 +490,20 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
return 0;
for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
- uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen;
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
if (unlikely(reserve_avail_buf_mergeable(vq, pkt_len,
- &start, &end) < 0)) {
+ &end, buf_vec) < 0)) {
LOG_DEBUG(VHOST_DATA,
- "(%" PRIu64 ") Failed to get enough desc from vring\n",
- dev->device_fh);
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
break;
}
- nr_used = copy_mbuf_to_desc_mergeable(dev, vq, start, end,
- pkts[pkt_idx]);
+ nr_used = copy_mbuf_to_desc_mergeable(dev, vq, end,
+ pkts[pkt_idx], buf_vec);
rte_smp_wmb();
- /*
- * Wait until it's our turn to add our buffer
- * to the used ring.
- */
- while (unlikely(vq->last_used_idx != start))
- rte_pause();
-
*(volatile uint16_t *)&vq->used->idx += nr_used;
vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
@@ -592,9 +524,14 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
}
uint16_t
-rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return 0;
+
if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
return virtio_dev_merge_rx(dev, queue_id, pkts, count);
else
@@ -747,22 +684,53 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t nr_desc = 1;
desc = &vq->desc[desc_idx];
- if (unlikely(desc->len < vq->vhost_hlen))
+ if (unlikely(desc->len < dev->vhost_hlen))
return -1;
desc_addr = gpa_to_vva(dev, desc->addr);
- rte_prefetch0((void *)(uintptr_t)desc_addr);
-
- /* Retrieve virtio net header */
hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
- desc_avail = desc->len - vq->vhost_hlen;
- desc_offset = vq->vhost_hlen;
+ rte_prefetch0(hdr);
+
+ /*
+ * A virtio driver normally uses at least 2 desc buffers
+ * for Tx: the first for storing the header, and others
+ * for storing the data.
+ */
+ if (likely((desc->len == dev->vhost_hlen) &&
+ (desc->flags & VRING_DESC_F_NEXT) != 0)) {
+ desc = &vq->desc[desc->next];
+
+ desc_addr = gpa_to_vva(dev, desc->addr);
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ nr_desc += 1;
+
+ PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
+ } else {
+ desc_avail = desc->len - dev->vhost_hlen;
+ desc_offset = dev->vhost_hlen;
+ }
mbuf_offset = 0;
mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
- while (desc_avail != 0 || (desc->flags & VRING_DESC_F_NEXT) != 0) {
+ while (1) {
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset),
+ (void *)((uintptr_t)(desc_addr + desc_offset)),
+ cpy_len);
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+
/* This desc reaches to its end, get the next one */
if (desc_avail == 0) {
+ if ((desc->flags & VRING_DESC_F_NEXT) == 0)
+ break;
+
if (unlikely(desc->next >= vq->size ||
++nr_desc >= vq->size))
return -1;
@@ -798,16 +766,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
mbuf_offset = 0;
mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
}
-
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
- rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset),
- (void *)((uintptr_t)(desc_addr + desc_offset)),
- cpy_len);
-
- mbuf_avail -= cpy_len;
- mbuf_offset += cpy_len;
- desc_avail -= cpy_len;
- desc_offset += cpy_len;
}
prev->data_len = mbuf_offset;
@@ -820,9 +778,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
uint16_t
-rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
+ struct virtio_net *dev;
struct rte_mbuf *rarp_mbuf = NULL;
struct vhost_virtqueue *vq;
uint32_t desc_indexes[MAX_PKT_BURST];
@@ -831,10 +790,13 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
uint16_t free_entries;
uint16_t avail_idx;
+ dev = get_device(vid);
+ if (!dev)
+ return 0;
+
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
- RTE_LOG(ERR, VHOST_DATA,
- "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
- __func__, dev->device_fh, queue_id);
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
return 0;
}
@@ -870,35 +832,37 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
if (free_entries == 0)
goto out;
- LOG_DEBUG(VHOST_DATA, "%s (%"PRIu64")\n", __func__, dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
/* Prefetch available ring to retrieve head indexes. */
used_idx = vq->last_used_idx & (vq->size - 1);
rte_prefetch0(&vq->avail->ring[used_idx]);
+ rte_prefetch0(&vq->used->ring[used_idx]);
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, free_entries);
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") about to dequeue %u buffers\n",
- dev->device_fh, count);
+ LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
+ dev->vid, count);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < count; i++) {
- desc_indexes[i] = vq->avail->ring[(vq->last_used_idx + i) &
- (vq->size - 1)];
+ used_idx = (vq->last_used_idx + i) & (vq->size - 1);
+ desc_indexes[i] = vq->avail->ring[used_idx];
+
+ vq->used->ring[used_idx].id = desc_indexes[i];
+ vq->used->ring[used_idx].len = 0;
+ vhost_log_used_vring(dev, vq,
+ offsetof(struct vring_used, ring[used_idx]),
+ sizeof(vq->used->ring[used_idx]));
}
/* Prefetch descriptor index. */
rte_prefetch0(&vq->desc[desc_indexes[0]]);
- rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);
-
for (i = 0; i < count; i++) {
int err;
- if (likely(i + 1 < count)) {
+ if (likely(i + 1 < count))
rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
- rte_prefetch0(&vq->used->ring[(used_idx + 1) &
- (vq->size - 1)]);
- }
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
@@ -912,18 +876,12 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
rte_pktmbuf_free(pkts[i]);
break;
}
-
- used_idx = vq->last_used_idx++ & (vq->size - 1);
- vq->used->ring[used_idx].id = desc_indexes[i];
- vq->used->ring[used_idx].len = 0;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[used_idx]),
- sizeof(vq->used->ring[used_idx]));
}
rte_smp_wmb();
rte_smp_rmb();
vq->used->idx += i;
+ vq->last_used_idx += i;
vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.c b/lib/librte_vhost/vhost_user/vhost-net-user.c
index df2bd648..94f1b923 100644
--- a/lib/librte_vhost/vhost_user/vhost-net-user.c
+++ b/lib/librte_vhost/vhost_user/vhost-net-user.c
@@ -33,6 +33,7 @@
#include <stdint.h>
#include <stdio.h>
+#include <stdbool.h>
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
@@ -40,6 +41,7 @@
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
+#include <sys/queue.h>
#include <errno.h>
#include <pthread.h>
@@ -51,32 +53,44 @@
#include "vhost-net.h"
#include "virtio-net-user.h"
-#define MAX_VIRTIO_BACKLOG 128
-
-static void vserver_new_vq_conn(int fd, void *data, int *remove);
-static void vserver_message_handler(int fd, void *dat, int *remove);
+/*
+ * Every time rte_vhost_driver_register() is invoked, an associated
+ * vhost_user_socket struct will be created.
+ */
+struct vhost_user_socket {
+ char *path;
+ int listenfd;
+ bool is_server;
+ bool reconnect;
+};
-struct connfd_ctx {
- struct vhost_server *vserver;
- uint32_t fh;
+struct vhost_user_connection {
+ struct vhost_user_socket *vsocket;
+ int vid;
};
-#define MAX_VHOST_SERVER 1024
-struct _vhost_server {
- struct vhost_server *server[MAX_VHOST_SERVER];
+#define MAX_VHOST_SOCKET 1024
+struct vhost_user {
+ struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
struct fdset fdset;
- int vserver_cnt;
- pthread_mutex_t server_mutex;
+ int vsocket_cnt;
+ pthread_mutex_t mutex;
};
-static struct _vhost_server g_vhost_server = {
+#define MAX_VIRTIO_BACKLOG 128
+
+static void vhost_user_server_new_connection(int fd, void *data, int *remove);
+static void vhost_user_msg_handler(int fd, void *dat, int *remove);
+static int vhost_user_create_client(struct vhost_user_socket *vsocket);
+
+static struct vhost_user vhost_user = {
.fdset = {
.fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
.fd_mutex = PTHREAD_MUTEX_INITIALIZER,
.num = 0
},
- .vserver_cnt = 0,
- .server_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .vsocket_cnt = 0,
+ .mutex = PTHREAD_MUTEX_INITIALIZER,
};
static const char *vhost_message_str[VHOST_USER_MAX] = {
@@ -102,48 +116,6 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
};
-/**
- * Create a unix domain socket, bind to path and listen for connection.
- * @return
- * socket fd or -1 on failure
- */
-static int
-uds_socket(const char *path)
-{
- struct sockaddr_un un;
- int sockfd;
- int ret;
-
- if (path == NULL)
- return -1;
-
- sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
- if (sockfd < 0)
- return -1;
- RTE_LOG(INFO, VHOST_CONFIG, "socket created, fd:%d\n", sockfd);
-
- memset(&un, 0, sizeof(un));
- un.sun_family = AF_UNIX;
- snprintf(un.sun_path, sizeof(un.sun_path), "%s", path);
- ret = bind(sockfd, (struct sockaddr *)&un, sizeof(un));
- if (ret == -1) {
- RTE_LOG(ERR, VHOST_CONFIG, "fail to bind fd:%d, remove file:%s and try again.\n",
- sockfd, path);
- goto err;
- }
- RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path);
-
- ret = listen(sockfd, MAX_VIRTIO_BACKLOG);
- if (ret == -1)
- goto err;
-
- return sockfd;
-
-err:
- close(sockfd);
- return -1;
-}
-
/* return bytes# of read on success or negative val on failure. */
static int
read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
@@ -278,62 +250,66 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg)
return ret;
}
-/* call back when there is new virtio connection. */
+
static void
-vserver_new_vq_conn(int fd, void *dat, __rte_unused int *remove)
+vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
{
- struct vhost_server *vserver = (struct vhost_server *)dat;
- int conn_fd;
- struct connfd_ctx *ctx;
- int fh;
- struct vhost_device_ctx vdev_ctx = { (pid_t)0, 0 };
- unsigned int size;
-
- conn_fd = accept(fd, NULL, NULL);
- RTE_LOG(INFO, VHOST_CONFIG,
- "new virtio connection is %d\n", conn_fd);
- if (conn_fd < 0)
- return;
+ int vid;
+ size_t size;
+ struct vhost_user_connection *conn;
- ctx = calloc(1, sizeof(*ctx));
- if (ctx == NULL) {
- close(conn_fd);
+ conn = malloc(sizeof(*conn));
+ if (conn == NULL) {
+ close(fd);
return;
}
- fh = vhost_new_device(vdev_ctx);
- if (fh == -1) {
- free(ctx);
- close(conn_fd);
+ vid = vhost_new_device();
+ if (vid == -1) {
+ close(fd);
+ free(conn);
return;
}
- vdev_ctx.fh = fh;
- size = strnlen(vserver->path, PATH_MAX);
- vhost_set_ifname(vdev_ctx, vserver->path,
- size);
+ size = strnlen(vsocket->path, PATH_MAX);
+ vhost_set_ifname(vid, vsocket->path, size);
+
+ RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
- RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", fh);
+ conn->vsocket = vsocket;
+ conn->vid = vid;
+ fdset_add(&vhost_user.fdset, fd, vhost_user_msg_handler, NULL, conn);
+}
+
+/* call back when there is new vhost-user connection from client */
+static void
+vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
+{
+ struct vhost_user_socket *vsocket = dat;
- ctx->vserver = vserver;
- ctx->fh = fh;
- fdset_add(&g_vhost_server.fdset,
- conn_fd, vserver_message_handler, NULL, ctx);
+ fd = accept(fd, NULL, NULL);
+ if (fd < 0)
+ return;
+
+ RTE_LOG(INFO, VHOST_CONFIG, "new vhost user connection is %d\n", fd);
+ vhost_user_add_connection(fd, vsocket);
}
/* callback when there is message on the connfd */
static void
-vserver_message_handler(int connfd, void *dat, int *remove)
+vhost_user_msg_handler(int connfd, void *dat, int *remove)
{
- struct vhost_device_ctx ctx;
- struct connfd_ctx *cfd_ctx = (struct connfd_ctx *)dat;
+ int vid;
+ struct vhost_user_connection *conn = dat;
struct VhostUserMsg msg;
uint64_t features;
int ret;
- ctx.fh = cfd_ctx->fh;
+ vid = conn->vid;
ret = read_vhost_message(connfd, &msg);
if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
+ struct vhost_user_socket *vsocket = conn->vsocket;
+
if (ret < 0)
RTE_LOG(ERR, VHOST_CONFIG,
"vhost read message failed\n");
@@ -346,8 +322,11 @@ vserver_message_handler(int connfd, void *dat, int *remove)
close(connfd);
*remove = 1;
- free(cfd_ctx);
- vhost_destroy_device(ctx);
+ free(conn);
+ vhost_destroy_device(vid);
+
+ if (vsocket->reconnect)
+ vhost_user_create_client(vsocket);
return;
}
@@ -356,14 +335,14 @@ vserver_message_handler(int connfd, void *dat, int *remove)
vhost_message_str[msg.request]);
switch (msg.request) {
case VHOST_USER_GET_FEATURES:
- ret = vhost_get_features(ctx, &features);
+ ret = vhost_get_features(vid, &features);
msg.payload.u64 = features;
msg.size = sizeof(msg.payload.u64);
send_vhost_message(connfd, &msg);
break;
case VHOST_USER_SET_FEATURES:
features = msg.payload.u64;
- vhost_set_features(ctx, &features);
+ vhost_set_features(vid, &features);
break;
case VHOST_USER_GET_PROTOCOL_FEATURES:
@@ -372,22 +351,22 @@ vserver_message_handler(int connfd, void *dat, int *remove)
send_vhost_message(connfd, &msg);
break;
case VHOST_USER_SET_PROTOCOL_FEATURES:
- user_set_protocol_features(ctx, msg.payload.u64);
+ user_set_protocol_features(vid, msg.payload.u64);
break;
case VHOST_USER_SET_OWNER:
- vhost_set_owner(ctx);
+ vhost_set_owner(vid);
break;
case VHOST_USER_RESET_OWNER:
- vhost_reset_owner(ctx);
+ vhost_reset_owner(vid);
break;
case VHOST_USER_SET_MEM_TABLE:
- user_set_mem_table(ctx, &msg);
+ user_set_mem_table(vid, &msg);
break;
case VHOST_USER_SET_LOG_BASE:
- user_set_log_base(ctx, &msg);
+ user_set_log_base(vid, &msg);
/* it needs a reply */
msg.size = sizeof(msg.payload.u64);
@@ -399,26 +378,26 @@ vserver_message_handler(int connfd, void *dat, int *remove)
break;
case VHOST_USER_SET_VRING_NUM:
- vhost_set_vring_num(ctx, &msg.payload.state);
+ vhost_set_vring_num(vid, &msg.payload.state);
break;
case VHOST_USER_SET_VRING_ADDR:
- vhost_set_vring_addr(ctx, &msg.payload.addr);
+ vhost_set_vring_addr(vid, &msg.payload.addr);
break;
case VHOST_USER_SET_VRING_BASE:
- vhost_set_vring_base(ctx, &msg.payload.state);
+ vhost_set_vring_base(vid, &msg.payload.state);
break;
case VHOST_USER_GET_VRING_BASE:
- ret = user_get_vring_base(ctx, &msg.payload.state);
+ ret = user_get_vring_base(vid, &msg.payload.state);
msg.size = sizeof(msg.payload.state);
send_vhost_message(connfd, &msg);
break;
case VHOST_USER_SET_VRING_KICK:
- user_set_vring_kick(ctx, &msg);
+ user_set_vring_kick(vid, &msg);
break;
case VHOST_USER_SET_VRING_CALL:
- user_set_vring_call(ctx, &msg);
+ user_set_vring_call(vid, &msg);
break;
case VHOST_USER_SET_VRING_ERR:
@@ -434,10 +413,10 @@ vserver_message_handler(int connfd, void *dat, int *remove)
break;
case VHOST_USER_SET_VRING_ENABLE:
- user_set_vring_enable(ctx, &msg.payload.state);
+ user_set_vring_enable(vid, &msg.payload.state);
break;
case VHOST_USER_SEND_RARP:
- user_send_rarp(ctx, &msg);
+ user_send_rarp(vid, &msg);
break;
default:
@@ -446,50 +425,222 @@ vserver_message_handler(int connfd, void *dat, int *remove)
}
}
-/**
- * Creates and initialise the vhost server.
- */
-int
-rte_vhost_driver_register(const char *path)
+static int
+create_unix_socket(const char *path, struct sockaddr_un *un, bool is_server)
{
- struct vhost_server *vserver;
+ int fd;
- pthread_mutex_lock(&g_vhost_server.server_mutex);
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0)
+ return -1;
+ RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n",
+ is_server ? "server" : "client", fd);
- if (g_vhost_server.vserver_cnt == MAX_VHOST_SERVER) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "error: the number of servers reaches maximum\n");
- pthread_mutex_unlock(&g_vhost_server.server_mutex);
+ memset(un, 0, sizeof(*un));
+ un->sun_family = AF_UNIX;
+ strncpy(un->sun_path, path, sizeof(un->sun_path));
+
+ return fd;
+}
+
+static int
+vhost_user_create_server(struct vhost_user_socket *vsocket)
+{
+ int fd;
+ int ret;
+ struct sockaddr_un un;
+ const char *path = vsocket->path;
+
+ fd = create_unix_socket(path, &un, vsocket->is_server);
+ if (fd < 0)
return -1;
+
+ ret = bind(fd, (struct sockaddr *)&un, sizeof(un));
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to bind to %s: %s; remove it and try again\n",
+ path, strerror(errno));
+ goto err;
}
+ RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path);
- vserver = calloc(sizeof(struct vhost_server), 1);
- if (vserver == NULL) {
- pthread_mutex_unlock(&g_vhost_server.server_mutex);
- return -1;
+ ret = listen(fd, MAX_VIRTIO_BACKLOG);
+ if (ret < 0)
+ goto err;
+
+ vsocket->listenfd = fd;
+ fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
+ NULL, vsocket);
+
+ return 0;
+
+err:
+ close(fd);
+ return -1;
+}
+
+struct vhost_user_reconnect {
+ struct sockaddr_un un;
+ int fd;
+ struct vhost_user_socket *vsocket;
+
+ TAILQ_ENTRY(vhost_user_reconnect) next;
+};
+
+TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
+struct vhost_user_reconnect_list {
+ struct vhost_user_reconnect_tailq_list head;
+ pthread_mutex_t mutex;
+};
+
+static struct vhost_user_reconnect_list reconn_list;
+static pthread_t reconn_tid;
+
+static void *
+vhost_user_client_reconnect(void *arg __rte_unused)
+{
+ struct vhost_user_reconnect *reconn, *next;
+
+ while (1) {
+ pthread_mutex_lock(&reconn_list.mutex);
+
+ /*
+ * An equal implementation of TAILQ_FOREACH_SAFE,
+ * which does not exist on all platforms.
+ */
+ for (reconn = TAILQ_FIRST(&reconn_list.head);
+ reconn != NULL; reconn = next) {
+ next = TAILQ_NEXT(reconn, next);
+
+ if (connect(reconn->fd, (struct sockaddr *)&reconn->un,
+ sizeof(reconn->un)) < 0)
+ continue;
+
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "%s: connected\n", reconn->vsocket->path);
+ vhost_user_add_connection(reconn->fd, reconn->vsocket);
+ TAILQ_REMOVE(&reconn_list.head, reconn, next);
+ free(reconn);
+ }
+
+ pthread_mutex_unlock(&reconn_list.mutex);
+ sleep(1);
}
- vserver->listenfd = uds_socket(path);
- if (vserver->listenfd < 0) {
- free(vserver);
- pthread_mutex_unlock(&g_vhost_server.server_mutex);
+ return NULL;
+}
+
+static int
+vhost_user_reconnect_init(void)
+{
+ int ret;
+
+ pthread_mutex_init(&reconn_list.mutex, NULL);
+ TAILQ_INIT(&reconn_list.head);
+
+ ret = pthread_create(&reconn_tid, NULL,
+ vhost_user_client_reconnect, NULL);
+ if (ret < 0)
+ RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
+
+ return ret;
+}
+
+static int
+vhost_user_create_client(struct vhost_user_socket *vsocket)
+{
+ int fd;
+ int ret;
+ struct sockaddr_un un;
+ const char *path = vsocket->path;
+ struct vhost_user_reconnect *reconn;
+
+ fd = create_unix_socket(path, &un, vsocket->is_server);
+ if (fd < 0)
return -1;
+
+ ret = connect(fd, (struct sockaddr *)&un, sizeof(un));
+ if (ret == 0) {
+ vhost_user_add_connection(fd, vsocket);
+ return 0;
}
- vserver->path = strdup(path);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to connect to %s: %s\n",
+ path, strerror(errno));
- fdset_add(&g_vhost_server.fdset, vserver->listenfd,
- vserver_new_vq_conn, NULL, vserver);
+ if (!vsocket->reconnect) {
+ close(fd);
+ return -1;
+ }
- g_vhost_server.server[g_vhost_server.vserver_cnt++] = vserver;
- pthread_mutex_unlock(&g_vhost_server.server_mutex);
+ RTE_LOG(ERR, VHOST_CONFIG, "%s: reconnecting...\n", path);
+ reconn = malloc(sizeof(*reconn));
+ reconn->un = un;
+ reconn->fd = fd;
+ reconn->vsocket = vsocket;
+ pthread_mutex_lock(&reconn_list.mutex);
+ TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
+ pthread_mutex_unlock(&reconn_list.mutex);
return 0;
}
+/*
+ * Register a new vhost-user socket; here we could act as server
+ * (the default case), or client (when RTE_VHOST_USER_CLIENT) flag
+ * is set.
+ */
+int
+rte_vhost_driver_register(const char *path, uint64_t flags)
+{
+ int ret = -1;
+ struct vhost_user_socket *vsocket;
+
+ if (!path)
+ return -1;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+
+ if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "error: the number of vhost sockets reaches maximum\n");
+ goto out;
+ }
+
+ vsocket = malloc(sizeof(struct vhost_user_socket));
+ if (!vsocket)
+ goto out;
+ memset(vsocket, 0, sizeof(struct vhost_user_socket));
+ vsocket->path = strdup(path);
+
+ if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
+ vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
+ if (vsocket->reconnect && reconn_tid == 0) {
+ if (vhost_user_reconnect_init() < 0)
+ goto out;
+ }
+ ret = vhost_user_create_client(vsocket);
+ } else {
+ vsocket->is_server = true;
+ ret = vhost_user_create_server(vsocket);
+ }
+ if (ret < 0) {
+ free(vsocket->path);
+ free(vsocket);
+ goto out;
+ }
+
+ vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
+
+out:
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return ret;
+}
/**
- * Unregister the specified vhost server
+ * Unregister the specified vhost socket
*/
int
rte_vhost_driver_unregister(const char *path)
@@ -497,28 +648,29 @@ rte_vhost_driver_unregister(const char *path)
int i;
int count;
- pthread_mutex_lock(&g_vhost_server.server_mutex);
-
- for (i = 0; i < g_vhost_server.vserver_cnt; i++) {
- if (!strcmp(g_vhost_server.server[i]->path, path)) {
- fdset_del(&g_vhost_server.fdset,
- g_vhost_server.server[i]->listenfd);
+ pthread_mutex_lock(&vhost_user.mutex);
- close(g_vhost_server.server[i]->listenfd);
- free(g_vhost_server.server[i]->path);
- free(g_vhost_server.server[i]);
+ for (i = 0; i < vhost_user.vsocket_cnt; i++) {
+ if (!strcmp(vhost_user.vsockets[i]->path, path)) {
+ if (vhost_user.vsockets[i]->is_server) {
+ fdset_del(&vhost_user.fdset,
+ vhost_user.vsockets[i]->listenfd);
+ close(vhost_user.vsockets[i]->listenfd);
+ unlink(path);
+ }
- unlink(path);
+ free(vhost_user.vsockets[i]->path);
+ free(vhost_user.vsockets[i]);
- count = --g_vhost_server.vserver_cnt;
- g_vhost_server.server[i] = g_vhost_server.server[count];
- g_vhost_server.server[count] = NULL;
- pthread_mutex_unlock(&g_vhost_server.server_mutex);
+ count = --vhost_user.vsocket_cnt;
+ vhost_user.vsockets[i] = vhost_user.vsockets[count];
+ vhost_user.vsockets[count] = NULL;
+ pthread_mutex_unlock(&vhost_user.mutex);
return 0;
}
}
- pthread_mutex_unlock(&g_vhost_server.server_mutex);
+ pthread_mutex_unlock(&vhost_user.mutex);
return -1;
}
@@ -526,6 +678,6 @@ rte_vhost_driver_unregister(const char *path)
int
rte_vhost_driver_session_start(void)
{
- fdset_event_dispatch(&g_vhost_server.fdset);
+ fdset_event_dispatch(&vhost_user.fdset);
return 0;
}
diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.h b/lib/librte_vhost/vhost_user/vhost-net-user.h
index e3bb4138..f5332396 100644
--- a/lib/librte_vhost/vhost_user/vhost-net-user.h
+++ b/lib/librte_vhost/vhost_user/vhost-net-user.h
@@ -38,15 +38,11 @@
#include <linux/vhost.h>
#include "rte_virtio_net.h"
-#include "fd_man.h"
-
-struct vhost_server {
- char *path; /**< The path the uds is bind to. */
- int listenfd; /**< The listener sockfd. */
-};
/* refer to hw/virtio/vhost-user.c */
+#define VHOST_MEMORY_MAX_NREGIONS 8
+
typedef enum VhostUserRequest {
VHOST_USER_NONE = 0,
VHOST_USER_GET_FEATURES = 1,
diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.c b/lib/librte_vhost/vhost_user/virtio-net-user.c
index f5248bc4..e7c43479 100644
--- a/lib/librte_vhost/vhost_user/virtio-net-user.c
+++ b/lib/librte_vhost/vhost_user/virtio-net-user.c
@@ -43,7 +43,6 @@
#include <rte_common.h>
#include <rte_log.h>
-#include "virtio-net.h"
#include "virtio-net-user.h"
#include "vhost-net-user.h"
#include "vhost-net.h"
@@ -64,9 +63,10 @@ static uint64_t
get_blk_size(int fd)
{
struct stat stat;
+ int ret;
- fstat(fd, &stat);
- return (uint64_t)stat.st_blksize;
+ ret = fstat(fd, &stat);
+ return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
static void
@@ -96,10 +96,14 @@ vhost_backend_cleanup(struct virtio_net *dev)
free(dev->mem);
dev->mem = NULL;
}
+ if (dev->log_addr) {
+ munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
+ dev->log_addr = 0;
+ }
}
int
-user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
+user_set_mem_table(int vid, struct VhostUserMsg *pmsg)
{
struct VhostUserMemory memory = pmsg->payload.memory;
struct virtio_memory_regions *pregion;
@@ -110,13 +114,15 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
uint64_t alignment;
/* unmap old memory regions one by one*/
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
/* Remove from the data plane. */
- if (dev->flags & VIRTIO_DEV_RUNNING)
- notify_ops->destroy_device(dev);
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ notify_ops->destroy_device(vid);
+ }
if (dev->mem) {
free_mem_region(dev);
@@ -130,8 +136,8 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
sizeof(struct orig_region_map) * memory.nregions);
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for dev->mem\n",
- dev->device_fh);
+ "(%d) failed to allocate memory for dev->mem\n",
+ dev->vid);
return -1;
}
dev->mem->nregions = memory.nregions;
@@ -162,6 +168,11 @@ user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
* aligned.
*/
alignment = get_blk_size(pmsg->fds[idx]);
+ if (alignment == (uint64_t)-1) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "couldn't get hugepage size through fstat\n");
+ goto err_mmap;
+ }
mapped_size = RTE_ALIGN_CEIL(mapped_size, alignment);
mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
@@ -252,7 +263,7 @@ virtio_is_ready(struct virtio_net *dev)
}
void
-user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
+user_set_vring_call(int vid, struct VhostUserMsg *pmsg)
{
struct vhost_vring_file file;
@@ -263,7 +274,7 @@ user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
file.fd = pmsg->fds[0];
RTE_LOG(INFO, VHOST_CONFIG,
"vring call idx:%d file:%d\n", file.index, file.fd);
- vhost_set_vring_call(ctx, &file);
+ vhost_set_vring_call(vid, &file);
}
@@ -272,10 +283,13 @@ user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
* device is ready for packet processing.
*/
void
-user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
+user_set_vring_kick(int vid, struct VhostUserMsg *pmsg)
{
struct vhost_vring_file file;
- struct virtio_net *dev = get_device(ctx);
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return;
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
@@ -284,30 +298,32 @@ user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
file.fd = pmsg->fds[0];
RTE_LOG(INFO, VHOST_CONFIG,
"vring kick idx:%d file:%d\n", file.index, file.fd);
- vhost_set_vring_kick(ctx, &file);
+ vhost_set_vring_kick(vid, &file);
- if (virtio_is_ready(dev) &&
- !(dev->flags & VIRTIO_DEV_RUNNING))
- notify_ops->new_device(dev);
+ if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) {
+ if (notify_ops->new_device(vid) == 0)
+ dev->flags |= VIRTIO_DEV_RUNNING;
+ }
}
/*
* when virtio is stopped, qemu will send us the GET_VRING_BASE message.
*/
int
-user_get_vring_base(struct vhost_device_ctx ctx,
- struct vhost_vring_state *state)
+user_get_vring_base(int vid, struct vhost_vring_state *state)
{
- struct virtio_net *dev = get_device(ctx);
+ struct virtio_net *dev = get_device(vid);
if (dev == NULL)
return -1;
/* We have to stop the queue (virtio) if it is running. */
- if (dev->flags & VIRTIO_DEV_RUNNING)
- notify_ops->destroy_device(dev);
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ notify_ops->destroy_device(vid);
+ }
/* Here we are safe to get the last used index */
- vhost_get_vring_base(ctx, state->index, state);
+ vhost_get_vring_base(vid, state->index, state);
RTE_LOG(INFO, VHOST_CONFIG,
"vring base idx:%d file:%d\n", state->index, state->num);
@@ -329,19 +345,21 @@ user_get_vring_base(struct vhost_device_ctx ctx,
* enable the virtio queue pair.
*/
int
-user_set_vring_enable(struct vhost_device_ctx ctx,
- struct vhost_vring_state *state)
+user_set_vring_enable(int vid, struct vhost_vring_state *state)
{
- struct virtio_net *dev = get_device(ctx);
+ struct virtio_net *dev;
int enable = (int)state->num;
+ dev = get_device(vid);
+ if (dev == NULL)
+ return -1;
+
RTE_LOG(INFO, VHOST_CONFIG,
"set queue enable: %d to qp idx: %d\n",
enable, state->index);
- if (notify_ops->vring_state_changed) {
- notify_ops->vring_state_changed(dev, state->index, enable);
- }
+ if (notify_ops->vring_state_changed)
+ notify_ops->vring_state_changed(vid, state->index, enable);
dev->virtqueue[state->index]->enabled = enable;
@@ -349,12 +367,11 @@ user_set_vring_enable(struct vhost_device_ctx ctx,
}
void
-user_set_protocol_features(struct vhost_device_ctx ctx,
- uint64_t protocol_features)
+user_set_protocol_features(int vid, uint64_t protocol_features)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL || protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
return;
@@ -362,15 +379,14 @@ user_set_protocol_features(struct vhost_device_ctx ctx,
}
int
-user_set_log_base(struct vhost_device_ctx ctx,
- struct VhostUserMsg *msg)
+user_set_log_base(int vid, struct VhostUserMsg *msg)
{
struct virtio_net *dev;
int fd = msg->fds[0];
uint64_t size, off;
void *addr;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (!dev)
return -1;
@@ -397,13 +413,21 @@ user_set_log_base(struct vhost_device_ctx ctx,
* fail when offset is not page size aligned.
*/
addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
if (addr == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
return -1;
}
- /* TODO: unmap on stop */
- dev->log_base = (uint64_t)(uintptr_t)addr + off;
+ /*
+ * Free previously mapped log memory on occasionally
+ * multiple VHOST_USER_SET_LOG_BASE.
+ */
+ if (dev->log_addr) {
+ munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
+ }
+ dev->log_addr = (uint64_t)(uintptr_t)addr;
+ dev->log_base = dev->log_addr + off;
dev->log_size = size;
return 0;
@@ -418,12 +442,12 @@ user_set_log_base(struct vhost_device_ctx ctx,
* a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
*/
int
-user_send_rarp(struct vhost_device_ctx ctx, struct VhostUserMsg *msg)
+user_send_rarp(int vid, struct VhostUserMsg *msg)
{
struct virtio_net *dev;
uint8_t *mac = (uint8_t *)&msg->payload.u64;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (!dev)
return -1;
diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.h b/lib/librte_vhost/vhost_user/virtio-net-user.h
index cefec162..e1b967b8 100644
--- a/lib/librte_vhost/vhost_user/virtio-net-user.h
+++ b/lib/librte_vhost/vhost_user/virtio-net-user.h
@@ -45,20 +45,18 @@
(1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |\
(1ULL << VHOST_USER_PROTOCOL_F_RARP))
-int user_set_mem_table(struct vhost_device_ctx, struct VhostUserMsg *);
+int user_set_mem_table(int, struct VhostUserMsg *);
-void user_set_vring_call(struct vhost_device_ctx, struct VhostUserMsg *);
+void user_set_vring_call(int, struct VhostUserMsg *);
-void user_set_vring_kick(struct vhost_device_ctx, struct VhostUserMsg *);
+void user_set_vring_kick(int, struct VhostUserMsg *);
-void user_set_protocol_features(struct vhost_device_ctx ctx,
- uint64_t protocol_features);
-int user_set_log_base(struct vhost_device_ctx ctx, struct VhostUserMsg *);
-int user_send_rarp(struct vhost_device_ctx ctx, struct VhostUserMsg *);
+void user_set_protocol_features(int vid, uint64_t protocol_features);
+int user_set_log_base(int vid, struct VhostUserMsg *);
+int user_send_rarp(int vid, struct VhostUserMsg *);
-int user_get_vring_base(struct vhost_device_ctx, struct vhost_vring_state *);
+int user_get_vring_base(int, struct vhost_vring_state *);
-int user_set_vring_enable(struct vhost_device_ctx ctx,
- struct vhost_vring_state *state);
+int user_set_vring_enable(int vid, struct vhost_vring_state *state);
#endif
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index d870ad97..1785695b 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -53,7 +53,6 @@
#include <rte_virtio_net.h>
#include "vhost-net.h"
-#include "virtio-net.h"
#define MAX_VHOST_DEVICE 1024
static struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
@@ -108,15 +107,14 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
return vhost_va;
}
-
struct virtio_net *
-get_device(struct vhost_device_ctx ctx)
+get_device(int vid)
{
- struct virtio_net *dev = vhost_devices[ctx.fh];
+ struct virtio_net *dev = vhost_devices[vid];
if (unlikely(!dev)) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") device not found.\n", ctx.fh);
+ "(%d) device not found.\n", vid);
}
return dev;
@@ -233,7 +231,7 @@ alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
/*
* Reset some variables in device structure, while keeping few
- * others untouched, such as device_fh, ifname, virt_qp_nb: they
+ * others untouched, such as vid, ifname, virt_qp_nb: they
* should be same unless the device is removed.
*/
static void
@@ -255,7 +253,7 @@ reset_device(struct virtio_net *dev)
* list.
*/
int
-vhost_new_device(struct vhost_device_ctx ctx)
+vhost_new_device(void)
{
struct virtio_net *dev;
int i;
@@ -263,8 +261,7 @@ vhost_new_device(struct vhost_device_ctx ctx)
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
if (dev == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for dev.\n",
- ctx.fh);
+ "Failed to allocate memory for new dev.\n");
return -1;
}
@@ -279,7 +276,7 @@ vhost_new_device(struct vhost_device_ctx ctx)
}
vhost_devices[i] = dev;
- dev->device_fh = i;
+ dev->vid = i;
return i;
}
@@ -289,30 +286,31 @@ vhost_new_device(struct vhost_device_ctx ctx)
* cleanup the device and remove it from device configuration linked list.
*/
void
-vhost_destroy_device(struct vhost_device_ctx ctx)
+vhost_destroy_device(int vid)
{
- struct virtio_net *dev = get_device(ctx);
+ struct virtio_net *dev = get_device(vid);
if (dev == NULL)
return;
- if (dev->flags & VIRTIO_DEV_RUNNING)
- notify_ops->destroy_device(dev);
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ notify_ops->destroy_device(vid);
+ }
cleanup_device(dev, 1);
free_device(dev);
- vhost_devices[ctx.fh] = NULL;
+ vhost_devices[vid] = NULL;
}
void
-vhost_set_ifname(struct vhost_device_ctx ctx,
- const char *if_name, unsigned int if_len)
+vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
{
struct virtio_net *dev;
unsigned int len;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return;
@@ -320,6 +318,7 @@ vhost_set_ifname(struct vhost_device_ctx ctx,
sizeof(dev->ifname) : if_len;
strncpy(dev->ifname, if_name, len);
+ dev->ifname[sizeof(dev->ifname) - 1] = '\0';
}
@@ -329,11 +328,11 @@ vhost_set_ifname(struct vhost_device_ctx ctx,
* the device hasn't been initialised.
*/
int
-vhost_set_owner(struct vhost_device_ctx ctx)
+vhost_set_owner(int vid)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
@@ -344,16 +343,18 @@ vhost_set_owner(struct vhost_device_ctx ctx)
* Called from CUSE IOCTL: VHOST_RESET_OWNER
*/
int
-vhost_reset_owner(struct vhost_device_ctx ctx)
+vhost_reset_owner(int vid)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
- if (dev->flags & VIRTIO_DEV_RUNNING)
- notify_ops->destroy_device(dev);
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ notify_ops->destroy_device(vid);
+ }
cleanup_device(dev, 0);
reset_device(dev);
@@ -365,11 +366,11 @@ vhost_reset_owner(struct vhost_device_ctx ctx)
* The features that we support are requested.
*/
int
-vhost_get_features(struct vhost_device_ctx ctx, uint64_t *pu)
+vhost_get_features(int vid, uint64_t *pu)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
@@ -383,13 +384,11 @@ vhost_get_features(struct vhost_device_ctx ctx, uint64_t *pu)
* We receive the negotiated features supported by us and the virtio device.
*/
int
-vhost_set_features(struct vhost_device_ctx ctx, uint64_t *pu)
+vhost_set_features(int vid, uint64_t *pu)
{
struct virtio_net *dev;
- uint16_t vhost_hlen;
- uint16_t i;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
if (*pu & ~VHOST_FEATURES)
@@ -398,23 +397,16 @@ vhost_set_features(struct vhost_device_ctx ctx, uint64_t *pu)
dev->features = *pu;
if (dev->features &
((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
- vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
- vhost_hlen = sizeof(struct virtio_net_hdr);
+ dev->vhost_hlen = sizeof(struct virtio_net_hdr);
}
LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") Mergeable RX buffers %s, virtio 1 %s\n",
- dev->device_fh,
+ "(%d) mergeable RX buffers %s, virtio 1 %s\n",
+ dev->vid,
(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
- for (i = 0; i < dev->virt_qp_nb; i++) {
- uint16_t base_idx = i * VIRTIO_QNUM;
-
- dev->virtqueue[base_idx + VIRTIO_RXQ]->vhost_hlen = vhost_hlen;
- dev->virtqueue[base_idx + VIRTIO_TXQ]->vhost_hlen = vhost_hlen;
- }
-
return 0;
}
@@ -423,12 +415,11 @@ vhost_set_features(struct vhost_device_ctx ctx, uint64_t *pu)
* The virtio device sends us the size of the descriptor ring.
*/
int
-vhost_set_vring_num(struct vhost_device_ctx ctx,
- struct vhost_vring_state *state)
+vhost_set_vring_num(int vid, struct vhost_vring_state *state)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
@@ -509,7 +500,7 @@ numa_realloc(struct virtio_net *dev, int index)
out:
dev->virtqueue[index] = vq;
dev->virtqueue[index + 1] = vq + 1;
- vhost_devices[dev->device_fh] = dev;
+ vhost_devices[dev->vid] = dev;
return dev;
}
@@ -527,12 +518,12 @@ numa_realloc(struct virtio_net *dev, int index __rte_unused)
* This function then converts these to our address space.
*/
int
-vhost_set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
+vhost_set_vring_addr(int vid, struct vhost_vring_addr *addr)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
- dev = get_device(ctx);
+ dev = get_device(vid);
if ((dev == NULL) || (dev->mem == NULL))
return -1;
@@ -544,8 +535,8 @@ vhost_set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
addr->desc_user_addr);
if (vq->desc == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find desc ring address.\n",
- dev->device_fh);
+ "(%d) failed to find desc ring address.\n",
+ dev->vid);
return -1;
}
@@ -556,8 +547,8 @@ vhost_set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
addr->avail_user_addr);
if (vq->avail == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find avail ring address.\n",
- dev->device_fh);
+ "(%d) failed to find avail ring address.\n",
+ dev->vid);
return -1;
}
@@ -565,21 +556,29 @@ vhost_set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
addr->used_user_addr);
if (vq->used == 0) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to find used ring address.\n",
- dev->device_fh);
+ "(%d) failed to find used ring address.\n",
+ dev->vid);
return -1;
}
+ if (vq->last_used_idx != vq->used->idx) {
+ RTE_LOG(WARNING, VHOST_CONFIG,
+ "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
+ "some packets maybe resent for Tx and dropped for Rx\n",
+ vq->last_used_idx, vq->used->idx);
+ vq->last_used_idx = vq->used->idx;
+ }
+
vq->log_guest_addr = addr->log_guest_addr;
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
- dev->device_fh, vq->desc);
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
- dev->device_fh, vq->avail);
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
- dev->device_fh, vq->used);
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") log_guest_addr: %"PRIx64"\n",
- dev->device_fh, vq->log_guest_addr);
+ LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
+ dev->vid, vq->desc);
+ LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
+ dev->vid, vq->avail);
+ LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
+ dev->vid, vq->used);
+ LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
+ dev->vid, vq->log_guest_addr);
return 0;
}
@@ -589,18 +588,16 @@ vhost_set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
* The virtio device sends us the available ring last used index.
*/
int
-vhost_set_vring_base(struct vhost_device_ctx ctx,
- struct vhost_vring_state *state)
+vhost_set_vring_base(int vid, struct vhost_vring_state *state)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
/* State->index refers to the queue index. The txq is 1, rxq is 0. */
dev->virtqueue[state->index]->last_used_idx = state->num;
- dev->virtqueue[state->index]->last_used_idx_res = state->num;
return 0;
}
@@ -610,12 +607,12 @@ vhost_set_vring_base(struct vhost_device_ctx ctx,
* We send the virtio device our available ring last used index.
*/
int
-vhost_get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
+vhost_get_vring_base(int vid, uint32_t index,
struct vhost_vring_state *state)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
@@ -633,13 +630,13 @@ vhost_get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
* copied into our process space.
*/
int
-vhost_set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
+vhost_set_vring_call(int vid, struct vhost_vring_file *file)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
uint32_t cur_qp_idx = file->index / VIRTIO_QNUM;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
@@ -670,12 +667,12 @@ vhost_set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
* This fd gets copied into our process space.
*/
int
-vhost_set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
+vhost_set_vring_kick(int vid, struct vhost_vring_file *file)
{
struct virtio_net *dev;
struct vhost_virtqueue *vq;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
@@ -700,11 +697,11 @@ vhost_set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
* The device will still exist in the device configuration linked list.
*/
int
-vhost_set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
+vhost_set_backend(int vid, struct vhost_vring_file *file)
{
struct virtio_net *dev;
- dev = get_device(ctx);
+ dev = get_device(vid);
if (dev == NULL)
return -1;
@@ -716,20 +713,98 @@ vhost_set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
* we add the device.
*/
if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
- if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
- ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) {
- return notify_ops->new_device(dev);
+ if (dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED &&
+ dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED) {
+ if (notify_ops->new_device(vid) < 0)
+ return -1;
+ dev->flags |= VIRTIO_DEV_RUNNING;
}
- /* Otherwise we remove it. */
- } else
- if (file->fd == VIRTIO_DEV_STOPPED)
- notify_ops->destroy_device(dev);
+ } else if (file->fd == VIRTIO_DEV_STOPPED) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ notify_ops->destroy_device(vid);
+ }
+
+ return 0;
+}
+
+int
+rte_vhost_get_numa_node(int vid)
+{
+#ifdef RTE_LIBRTE_VHOST_NUMA
+ struct virtio_net *dev = get_device(vid);
+ int numa_node;
+ int ret;
+
+ if (dev == NULL)
+ return -1;
+
+ ret = get_mempolicy(&numa_node, NULL, 0, dev,
+ MPOL_F_NODE | MPOL_F_ADDR);
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%d) failed to query numa node: %d\n", vid, ret);
+ return -1;
+ }
+
+ return numa_node;
+#else
+ RTE_SET_USED(vid);
+ return -1;
+#endif
+}
+
+uint32_t
+rte_vhost_get_queue_num(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return 0;
+
+ return dev->virt_qp_nb;
+}
+
+int
+rte_vhost_get_ifname(int vid, char *buf, size_t len)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ len = RTE_MIN(len, sizeof(dev->ifname));
+
+ strncpy(buf, dev->ifname, len);
+ buf[len - 1] = '\0';
+
return 0;
}
-int rte_vhost_enable_guest_notification(struct virtio_net *dev,
- uint16_t queue_id, int enable)
+uint16_t
+rte_vhost_avail_entries(int vid, uint16_t queue_id)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (!dev)
+ return 0;
+
+ vq = dev->virtqueue[queue_id];
+ if (!vq->enabled)
+ return 0;
+
+ return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
+}
+
+int
+rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
if (enable) {
RTE_LOG(ERR, VHOST_CONFIG,
"guest notification isn't supported.\n");
diff --git a/mk/arch/arm/rte.vars.mk b/mk/arch/arm/rte.vars.mk
index bd85140e..2f8cf7cb 100644
--- a/mk/arch/arm/rte.vars.mk
+++ b/mk/arch/arm/rte.vars.mk
@@ -37,3 +37,8 @@ CPU_LDFLAGS ?=
CPU_ASFLAGS ?= -felf
export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
+
+RTE_OBJCOPY_TARGET = elf32-littlearm
+RTE_OBJCOPY_ARCH = arm
+
+export RTE_OBJCOPY_TARGET RTE_OBJCOPY_ARCH
diff --git a/mk/arch/arm64/rte.vars.mk b/mk/arch/arm64/rte.vars.mk
index 32e3a5fd..c168426d 100644
--- a/mk/arch/arm64/rte.vars.mk
+++ b/mk/arch/arm64/rte.vars.mk
@@ -56,3 +56,8 @@ CPU_LDFLAGS ?=
CPU_ASFLAGS ?= -felf
export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
+
+RTE_OBJCOPY_TARGET = elf64-littleaarch64
+RTE_OBJCOPY_ARCH = aarch64
+
+export RTE_OBJCOPY_TARGET RTE_OBJCOPY_ARCH
diff --git a/mk/arch/i686/rte.vars.mk b/mk/arch/i686/rte.vars.mk
index 8ba9a230..6a25312c 100644
--- a/mk/arch/i686/rte.vars.mk
+++ b/mk/arch/i686/rte.vars.mk
@@ -57,3 +57,8 @@ CPU_LDFLAGS ?= -melf_i386
CPU_ASFLAGS ?= -felf
export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
+
+RTE_OBJCOPY_TARGET = elf32-i386
+RTE_OBJCOPY_ARCH = i386
+
+export RTE_OBJCOPY_TARGET RTE_OBJCOPY_ARCH
diff --git a/mk/arch/ppc_64/rte.vars.mk b/mk/arch/ppc_64/rte.vars.mk
index 363fcd14..ef3ba1dc 100644
--- a/mk/arch/ppc_64/rte.vars.mk
+++ b/mk/arch/ppc_64/rte.vars.mk
@@ -37,3 +37,8 @@ CPU_LDFLAGS ?=
CPU_ASFLAGS ?= -felf64
export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
+
+RTE_OBJCOPY_TARGET = elf64-powerpcle
+RTE_OBJCOPY_ARCH = powerpc:common64
+
+export RTE_OBJCOPY_TARGET RTE_OBJCOPY_ARCH
diff --git a/mk/arch/x86_64/rte.vars.mk b/mk/arch/x86_64/rte.vars.mk
index b986f04b..83723c8e 100644
--- a/mk/arch/x86_64/rte.vars.mk
+++ b/mk/arch/x86_64/rte.vars.mk
@@ -57,3 +57,8 @@ CPU_LDFLAGS ?=
CPU_ASFLAGS ?= -felf64
export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
+
+RTE_OBJCOPY_TARGET = elf64-x86-64
+RTE_OBJCOPY_ARCH = i386:x86-64
+
+export RTE_OBJCOPY_TARGET RTE_OBJCOPY_ARCH
diff --git a/mk/arch/x86_x32/rte.vars.mk b/mk/arch/x86_x32/rte.vars.mk
index 3103dfc9..676f3167 100644
--- a/mk/arch/x86_x32/rte.vars.mk
+++ b/mk/arch/x86_x32/rte.vars.mk
@@ -61,3 +61,8 @@ ifneq ($(shell echo | $(CC) $(CPU_CFLAGS) -E - 2>/dev/null 1>/dev/null && echo 0
endif
export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
+
+RTE_OBJCOPY_TARGET = elf32-x86-64
+RTE_OBJCOPY_ARCH = i386:x86-64
+
+export RTE_OBJCOPY_TARGET RTE_OBJCOPY_ARCH
diff --git a/mk/exec-env/linuxapp/rte.vars.mk b/mk/exec-env/linuxapp/rte.vars.mk
index d51bd170..a8a1ee4c 100644
--- a/mk/exec-env/linuxapp/rte.vars.mk
+++ b/mk/exec-env/linuxapp/rte.vars.mk
@@ -45,9 +45,6 @@ else
EXECENV_CFLAGS = -pthread
endif
-# Workaround lack of DT_NEEDED entry
-EXECENV_LDFLAGS = --no-as-needed
-
EXECENV_LDLIBS =
EXECENV_ASFLAGS =
diff --git a/mk/machine/armv7a/rte.vars.mk b/mk/machine/armv7a/rte.vars.mk
index abdb15e5..36fa3dea 100644
--- a/mk/machine/armv7a/rte.vars.mk
+++ b/mk/machine/armv7a/rte.vars.mk
@@ -54,8 +54,6 @@
# CPU_LDFLAGS =
# CPU_ASFLAGS =
-CPU_CFLAGS += -mfloat-abi=softfp
-
MACHINE_CFLAGS += -march=armv7-a
ifdef CONFIG_RTE_ARCH_ARM_TUNE
diff --git a/mk/machine/dpaa2/rte.vars.mk b/mk/machine/dpaa2/rte.vars.mk
new file mode 100644
index 00000000..8541633c
--- /dev/null
+++ b/mk/machine/dpaa2/rte.vars.mk
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Freescale Semiconductor nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# machine:
+#
+# - can define ARCH variable (overridden by cmdline value)
+# - can define CROSS variable (overridden by cmdline value)
+# - define MACHINE_CFLAGS variable (overridden by cmdline value)
+# - define MACHINE_LDFLAGS variable (overridden by cmdline value)
+# - define MACHINE_ASFLAGS variable (overridden by cmdline value)
+# - can define CPU_CFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - can define CPU_LDFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - can define CPU_ASFLAGS variable (overridden by cmdline value) that
+# overrides the one defined in arch.
+# - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+MACHINE_CFLAGS += -march=armv8-a
+
+ifdef CONFIG_RTE_ARCH_ARM_TUNE
+MACHINE_CFLAGS += -mcpu=$(CONFIG_RTE_ARCH_ARM_TUNE)
+endif
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index c66e491d..ea229618 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -50,6 +50,9 @@ ifeq ($(NO_LDSCRIPT),)
LDSCRIPT = $(RTE_LDSCRIPT)
endif
+# Link only the libraries used in the application
+LDFLAGS += --as-needed
+
# default path for libs
_LDLIBS-y += -L$(RTE_SDK_BIN)/lib
@@ -57,11 +60,6 @@ _LDLIBS-y += -L$(RTE_SDK_BIN)/lib
# Order is important: from higher level to lower level
#
-_LDLIBS-y += --whole-archive
-
-_LDLIBS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += -lrte_distributor
-_LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER) += -lrte_reorder
-
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_KNI) += -lrte_kni
_LDLIBS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += -lrte_ivshmem
@@ -70,51 +68,29 @@ endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += -lrte_pipeline
_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += -lrte_table
_LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += -lrte_port
-_LDLIBS-$(CONFIG_RTE_LIBRTE_TIMER) += -lrte_timer
-_LDLIBS-$(CONFIG_RTE_LIBRTE_HASH) += -lrte_hash
-_LDLIBS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += -lrte_jobstats
-_LDLIBS-$(CONFIG_RTE_LIBRTE_LPM) += -lrte_lpm
-_LDLIBS-$(CONFIG_RTE_LIBRTE_POWER) += -lrte_power
-_LDLIBS-$(CONFIG_RTE_LIBRTE_ACL) += -lrte_acl
+
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PDUMP) += -lrte_pdump
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += -lrte_distributor
+_LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER) += -lrte_reorder
+_LDLIBS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += -lrte_ip_frag
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lrte_meter
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrte_sched
-_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lrte_vhost
+_LDLIBS-$(CONFIG_RTE_LIBRTE_LPM) += -lrte_lpm
+# librte_acl needs --whole-archive because of weak functions
+_LDLIBS-$(CONFIG_RTE_LIBRTE_ACL) += --whole-archive
+_LDLIBS-$(CONFIG_RTE_LIBRTE_ACL) += -lrte_acl
+_LDLIBS-$(CONFIG_RTE_LIBRTE_ACL) += --no-whole-archive
+_LDLIBS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += -lrte_jobstats
+_LDLIBS-$(CONFIG_RTE_LIBRTE_POWER) += -lrte_power
-# The static libraries do not know their dependencies.
-# So linking with static library requires explicit dependencies.
-ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
-_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
-_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
-ifeq ($(CONFIG_RTE_LIBRTE_VHOST_NUMA),y)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lnuma
-endif
-ifeq ($(CONFIG_RTE_LIBRTE_VHOST_USER),n)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lfuse
-endif
-_LDLIBS-$(CONFIG_RTE_PORT_PCAP) += -lpcap
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += -lpcap
-_LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lz
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -libverbs
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -libverbs
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += -lsze2
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lxenstore
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += -lgxio
-_LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += -lm
-# QAT / AESNI GCM PMDs are dependent on libcrypto (from openssl)
-# for calculating HMAC precomputes
-ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT),y)
-_LDLIBS-y += -lcrypto
-else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
-_LDLIBS-y += -lcrypto
-endif
-endif # !CONFIG_RTE_BUILD_SHARED_LIBS
+_LDLIBS-y += --whole-archive
-_LDLIBS-y += --start-group
+_LDLIBS-$(CONFIG_RTE_LIBRTE_TIMER) += -lrte_timer
+_LDLIBS-$(CONFIG_RTE_LIBRTE_HASH) += -lrte_hash
+_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lrte_vhost
_LDLIBS-$(CONFIG_RTE_LIBRTE_KVARGS) += -lrte_kvargs
_LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF) += -lrte_mbuf
-_LDLIBS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += -lrte_ip_frag
_LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER) += -lethdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += -lrte_cryptodev
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += -lrte_mempool
@@ -122,68 +98,83 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_RING) += -lrte_ring
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrte_eal
_LDLIBS-$(CONFIG_RTE_LIBRTE_CMDLINE) += -lrte_cmdline
_LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# plugins (link only if static libraries)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += -lrte_pmd_vmxnet3_uio
-_LDLIBS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += -lrte_pmd_virtio
-_LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += -lrte_pmd_af_packet
+_LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x -lz
+_LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += -lrte_pmd_bnxt
_LDLIBS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += -lrte_pmd_cxgbe
+_LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += -lrte_pmd_e1000
+_LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += -lrte_pmd_ena
_LDLIBS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += -lrte_pmd_enic
-_LDLIBS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += -lrte_pmd_i40e
_LDLIBS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += -lrte_pmd_fm10k
+_LDLIBS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += -lrte_pmd_i40e
_LDLIBS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += -lrte_pmd_ixgbe
-_LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += -lrte_pmd_e1000
-_LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += -lrte_pmd_ena
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -lrte_pmd_mlx4
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5
-_LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += -lrte_pmd_nfp
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += -lrte_pmd_szedata2
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += -lrte_pmd_mpipe
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_RING) += -lrte_pmd_ring
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += -lrte_pmd_pcap
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += -lrte_pmd_af_packet
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -lrte_pmd_mlx4 -libverbs
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -libverbs
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += -lrte_pmd_mpipe -lgxio
+_LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += -lrte_pmd_nfp -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += -lrte_pmd_null
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += -lrte_pmd_pcap -lpcap
+_LDLIBS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += -lrte_pmd_qede -lz
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_RING) += -lrte_pmd_ring
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += -lrte_pmd_szedata2 -lsze2
+_LDLIBS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += -lrte_pmd_thunderx_nicvf -lm
+_LDLIBS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += -lrte_pmd_virtio
+ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += -lrte_pmd_vhost
+endif # $(CONFIG_RTE_LIBRTE_VHOST)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += -lrte_pmd_vmxnet3_uio
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm -lcrypto
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
-
-# AESNI MULTI BUFFER / GCM PMDs are dependent on the IPSec_MB library
-ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_MB),y)
-_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
-else ifeq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM),y)
-_LDLIBS-y += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
-endif
-
-# SNOW3G PMD is dependent on the LIBSSO library
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -lrte_pmd_snow3g
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -L$(LIBSSO_PATH)/build -lsso
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += -lrte_pmd_kasumi
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
endif # CONFIG_RTE_LIBRTE_CRYPTODEV
-ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
-
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += -lrte_pmd_vhost
+endif # !CONFIG_RTE_BUILD_SHARED_LIBS
-endif # $(CONFIG_RTE_LIBRTE_VHOST)
+_LDLIBS-y += --no-whole-archive
-endif # ! $(CONFIG_RTE_BUILD_SHARED_LIB)
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
+# The static libraries do not know their dependencies.
+# So linking with static library requires explicit dependencies.
+_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
+_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
+_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
+ifeq ($(CONFIG_RTE_LIBRTE_VHOST_NUMA),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lnuma
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_VHOST_USER),n)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lfuse
+endif
+_LDLIBS-$(CONFIG_RTE_PORT_PCAP) += -lpcap
+endif # !CONFIG_RTE_BUILD_SHARED_LIBS
_LDLIBS-y += $(EXECENV_LDLIBS)
-_LDLIBS-y += --end-group
-_LDLIBS-y += --no-whole-archive
LDLIBS += $(_LDLIBS-y) $(CPU_LDLIBS) $(EXTRA_LDLIBS)
# Eliminate duplicates without sorting
LDLIBS := $(shell echo $(LDLIBS) | \
- awk '{for (i = 1; i <= NF; i++) { if (!seen[$$i]++) print $$i }}')
+ awk '{for (i = 1; i <= NF; i++) { \
+ if ($$i !~ /^-l.*/ || !seen[$$i]++) print $$i }}')
+
+ifeq ($(RTE_DEVEL_BUILD)$(CONFIG_RTE_BUILD_SHARED_LIB),yy)
+LDFLAGS += -rpath=$(RTE_SDK_BIN)/lib
+endif
.PHONY: all
all: install
diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk
index 8f7e021c..0187ae85 100644
--- a/mk/rte.lib.mk
+++ b/mk/rte.lib.mk
@@ -77,6 +77,13 @@ else
_CPU_LDFLAGS := $(CPU_LDFLAGS)
endif
+# Translate DEPDIRS-y into LDLIBS
+# Ignore (sub)directory dependencies which do not provide an actual library
+_IGNORE_DIRS = lib/librte_eal/% lib/librte_net lib/librte_compat
+_DEPDIRS = $(filter-out $(_IGNORE_DIRS),$(DEPDIRS-y))
+_LDDIRS = $(subst librte_ether,libethdev,$(_DEPDIRS))
+LDLIBS += $(subst lib/lib,-l,$(_LDDIRS))
+
O_TO_A = $(AR) crDs $(LIB) $(OBJS-y)
O_TO_A_STR = $(subst ','\'',$(O_TO_A)) #'# fix syntax highlight
O_TO_A_DISP = $(if $(V),"$(O_TO_A_STR)"," AR $(@)")
@@ -86,8 +93,8 @@ O_TO_A_DO = @set -e; \
$(O_TO_A) && \
echo $(O_TO_A_CMD) > $(call exe2cmd,$(@))
-O_TO_S = $(LD) $(_CPU_LDFLAGS) $(EXTRA_LDFLAGS) -shared $(OBJS-y) $(LDLIBS) \
- -Wl,-soname,$(LIB) -o $(LIB)
+O_TO_S = $(LD) -L$(RTE_SDK_BIN)/lib $(_CPU_LDFLAGS) $(EXTRA_LDFLAGS) \
+ -shared $(OBJS-y) -z defs $(LDLIBS) -Wl,-soname,$(LIB) -o $(LIB)
O_TO_S_STR = $(subst ','\'',$(O_TO_S)) #'# fix syntax highlight
O_TO_S_DISP = $(if $(V),"$(O_TO_S_STR)"," LD $(@)")
O_TO_S_DO = @set -e; \
@@ -95,24 +102,6 @@ O_TO_S_DO = @set -e; \
$(O_TO_S) && \
echo $(O_TO_S_CMD) > $(call exe2cmd,$(@))
-ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
-O_TO_C = $(AR) crus $(LIB_ONE) $(OBJS-y)
-O_TO_C_STR = $(subst ','\'',$(O_TO_C)) #'# fix syntax highlight
-O_TO_C_DISP = $(if $(V),"$(O_TO_C_STR)"," AR_C $(@)")
-O_TO_C_DO = @set -e; \
- $(lib_dir) \
- $(copy_obj)
-else
-O_TO_C = $(LD) -shared $(OBJS-y) -o $(LIB_ONE)
-O_TO_C_STR = $(subst ','\'',$(O_TO_C)) #'# fix syntax highlight
-O_TO_C_DISP = $(if $(V),"$(O_TO_C_STR)"," LD_C $(@)")
-O_TO_C_DO = @set -e; \
- $(lib_dir) \
- $(copy_obj)
-endif
-
-copy_obj = cp -f $(OBJS-y) $(RTE_OUTPUT)/build/lib;
-lib_dir = [ -d $(RTE_OUTPUT)/lib ] || mkdir -p $(RTE_OUTPUT)/lib;
-include .$(LIB).cmd
#
diff --git a/mk/rte.sdkinstall.mk b/mk/rte.sdkinstall.mk
index 68e56b68..abdab0f0 100644
--- a/mk/rte.sdkinstall.mk
+++ b/mk/rte.sdkinstall.mk
@@ -116,9 +116,9 @@ install-runtime:
$(Q)$(call rte_mkdir, $(DESTDIR)$(libdir))
$(Q)cp -a $O/lib/* $(DESTDIR)$(libdir)
$(Q)$(call rte_mkdir, $(DESTDIR)$(bindir))
- $(Q)tar -cf - -C $O app --exclude 'app/*.map' \
+ $(Q)tar -cf - -C $O --exclude 'app/*.map' \
--exclude 'app/cmdline*' --exclude app/test \
- --exclude app/testacl --exclude app/testpipeline | \
+ --exclude app/testacl --exclude app/testpipeline app | \
tar -xf - -C $(DESTDIR)$(bindir) --strip-components=1 \
--keep-newer-files --warning=no-ignore-newer
$(Q)$(call rte_mkdir, $(DESTDIR)$(datadir))
diff --git a/mk/rte.sdktest.mk b/mk/rte.sdktest.mk
index ee25f280..ff571819 100644
--- a/mk/rte.sdktest.mk
+++ b/mk/rte.sdktest.mk
@@ -46,14 +46,14 @@ DIR := $(shell basename $(RTE_OUTPUT))
#
# test: launch auto-tests, very simple for now.
#
-PHONY: test fast_test
+.PHONY: test fast_test perf_test coverage
-coverage: BLACKLIST=-Mempool_perf,Memcpy_perf,Hash_perf
-fast_test: BLACKLIST=-Ring_perf,Mempool_perf,Memcpy_perf,Hash_perf,Lpm6
-ring_test: WHITELIST=Ring,Ring_perf
-mempool_test: WHITELIST=Mempool,Mempool_perf
-perf_test:WHITELIST=Mempool_perf,Memcpy_perf,Hash_perf,Ring_perf
-test fast_test ring_test mempool_test perf_test:
+PERFLIST=ring_perf,mempool_perf,memcpy_perf,hash_perf,timer_perf
+coverage: BLACKLIST=-$(PERFLIST)
+fast_test: BLACKLIST=-$(PERFLIST)
+perf_test: WHITELIST=$(PERFLIST)
+
+test fast_test perf_test:
@mkdir -p $(AUTOTEST_DIR) ; \
cd $(AUTOTEST_DIR) ; \
if [ -f $(RTE_OUTPUT)/app/test ]; then \
diff --git a/scripts/check-git-log.sh b/scripts/check-git-log.sh
index ce6c15ea..7d2c7ee8 100755
--- a/scripts/check-git-log.sh
+++ b/scripts/check-git-log.sh
@@ -55,7 +55,7 @@ tags=$(git log --format='%b' $range | grep -i -e 'by *:' -e 'fix.*:')
fixes=$(git log --format='%h %s' $range | grep -i ': *fix' | cut -d' ' -f1)
# check headline format (spacing, no punctuation, no code)
-bad=$(echo "$headlines" | grep \
+bad=$(echo "$headlines" | grep --color=always \
-e ' ' \
-e '^ ' \
-e ' $' \
@@ -69,7 +69,7 @@ bad=$(echo "$headlines" | grep \
[ -z "$bad" ] || printf "Wrong headline format:\n$bad\n"
# check headline label for common typos
-bad=$(echo "$headlines" | grep \
+bad=$(echo "$headlines" | grep --color=always \
-e '^example[:/]' \
-e '^apps/' \
-e '^testpmd' \
@@ -79,15 +79,15 @@ bad=$(echo "$headlines" | grep \
[ -z "$bad" ] || printf "Wrong headline label:\n$bad\n"
# check headline lowercase for first words
-bad=$(echo "$headlines" | grep \
+bad=$(echo "$headlines" | grep --color=always \
-e '^.*[A-Z].*:' \
-e ': *[A-Z]' \
| sed 's,^,\t,')
[ -z "$bad" ] || printf "Wrong headline uppercase:\n$bad\n"
# check headline uppercase (Rx/Tx, VF, L2, MAC, Linux, ARM...)
-bad=$(echo "$headlines" | grep \
- -e 'rx\|tx\|RX\|TX' \
+bad=$(echo "$headlines" | grep -E --color=always \
+ -e '\<(rx|tx|RX|TX)\>' \
-e '\<[pv]f\>' \
-e '\<l[234]\>' \
-e ':.*\<dma\>' \
@@ -111,9 +111,15 @@ bad=$(echo "$headlines" | awk 'length>60 {print}' | sed 's,^,\t,')
[ -z "$bad" ] || printf "Headline too long:\n$bad\n"
# check body lines length (75 max)
-bad=$(echo "$bodylines" | awk 'length>75 {print}' | sed 's,^,\t,')
+bad=$(echo "$bodylines" | grep -v '^Fixes:' | awk 'length>75 {print}' | sed 's,^,\t,')
[ -z "$bad" ] || printf "Line too long:\n$bad\n"
+# check starting commit message with "It"
+bad=$(echo "$bodylines" | head -n1 | grep -E --color=always \
+ -ie '^It ' \
+ | sed 's,^,\t,')
+[ -z "$bad" ] || printf "Wrong beginning of commit message:\n$bad\n"
+
# check tags spelling
bad=$(echo "$tags" |
grep -v '^\(Reported\|Suggested\|Signed-off\|Acked\|Reviewed\|Tested\)-by: [^,]* <.*@.*>$' |
@@ -134,7 +140,11 @@ IFS='
fixtags=$(echo "$tags" | grep '^Fixes: ')
bad=$(for fixtag in $fixtags ; do
hash=$(echo "$fixtag" | sed 's,^Fixes: \([0-9a-f]*\).*,\1,')
- good="Fixes: $hash "$(git log --format='("%s")' -1 $hash 2>&-)
+ if git branch --contains $hash | grep -q '^\*' ; then
+ good="Fixes: $hash "$(git log --format='("%s")' -1 $hash 2>&-)
+ else
+ good="reference not in current branch"
+ fi
printf "$fixtag" | grep -v "^$good$"
done | sed 's,^,\t,')
[ -z "$bad" ] || printf "Wrong 'Fixes' reference:\n$bad\n"
diff --git a/scripts/checkpatches.sh b/scripts/checkpatches.sh
index 5c58a202..7111558f 100755
--- a/scripts/checkpatches.sh
+++ b/scripts/checkpatches.sh
@@ -42,23 +42,25 @@ options="--no-tree"
options="$options --max-line-length=$length"
options="$options --show-types"
options="$options --ignore=LINUX_VERSION_CODE,FILE_PATH_CHANGES,\
-VOLATILE,PREFER_PACKED,PREFER_ALIGNED,PREFER_PRINTF,PREFER_KERNEL_TYPES,\
+VOLATILE,PREFER_PACKED,PREFER_ALIGNED,PREFER_PRINTF,PREFER_KERNEL_TYPES,BIT_MACRO,\
SPLIT_STRING,LINE_SPACING,PARENTHESIS_ALIGNMENT,NETWORKING_BLOCK_COMMENT_STYLE,\
NEW_TYPEDEFS,COMPARISON_TO_NULL"
print_usage () {
cat <<- END_OF_HELP
- usage: $(basename $0) [-q] [-v] [patch1 [patch2] ...]]
+ usage: $(basename $0) [-q] [-v] [-nX|patch1 [patch2] ...]]
Run Linux kernel checkpatch.pl with DPDK options.
The environment variable DPDK_CHECKPATCH_PATH must be set.
END_OF_HELP
}
+number=0
quiet=false
verbose=false
-while getopts hqv ARG ; do
+while getopts hn:qv ARG ; do
case $ARG in
+ n ) number=$OPTARG ;;
q ) quiet=true && options="$options --no-summary" ;;
v ) verbose=true ;;
h ) print_usage ; exit 0 ;;
@@ -74,17 +76,42 @@ if [ ! -x "$DPDK_CHECKPATCH_PATH" ] ; then
exit 1
fi
+total=0
status=0
-for p in "$@" ; do
- ! $verbose || printf '\n### %s\n\n' "$p"
- report=$($DPDK_CHECKPATCH_PATH $options "$p" 2>/dev/null)
+
+check () { # <patch> <commit> <title>
+ total=$(($total + 1))
+ ! $verbose || printf '\n### %s\n\n' "$3"
+ if [ -n "$1" ] ; then
+ report=$($DPDK_CHECKPATCH_PATH $options "$1" 2>/dev/null)
+ elif [ -n "$2" ] ; then
+ report=$(git format-patch --no-stat --stdout -1 $commit |
+ $DPDK_CHECKPATCH_PATH $options - 2>/dev/null)
+ fi
[ $? -ne 0 ] || continue
- $verbose || printf '\n### %s\n\n' "$p"
+ $verbose || printf '\n### %s\n\n' "$3"
printf '%s\n' "$report" | head -n -6
status=$(($status + 1))
-done
-pass=$(($# - $status))
-$quiet || printf '%d/%d valid patch' $pass $#
+}
+
+if [ -z "$1" ] ; then
+ if [ $number -eq 0 ] ; then
+ commits=$(git rev-list origin/master..)
+ else
+ commits=$(git rev-list --max-count=$number HEAD)
+ fi
+ for commit in $commits ; do
+ subject=$(git log --format='%s' -1 $commit)
+ check '' $commit "$subject"
+ done
+else
+ for patch in "$@" ; do
+ subject=$(sed -n 's,^Subject: ,,p' "$patch")
+ check "$patch" '' "$subject"
+ done
+fi
+pass=$(($total - $status))
+$quiet || printf '%d/%d valid patch' $pass $total
$quiet || [ $pass -le 1 ] || printf 'es'
$quiet || printf '\n'
exit $status
diff --git a/scripts/merge-maps.sh b/scripts/merge-maps.sh
deleted file mode 100755
index edc88dea..00000000
--- a/scripts/merge-maps.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-
-FILES=$(find "$RTE_SDK"/lib "$RTE_SDK"/drivers -name "*_version.map")
-SYMBOLS=$(grep -h "{" $FILES | sort -u | sed 's/{//')
-
-first=0
-prev_sym="none"
-
-for s in $SYMBOLS; do
- echo "$s {"
- echo " global:"
- echo ""
- for f in $FILES; do
- sed -n "/$s {/,/}/p" "$f" | sed '/^$/d' | grep -v global | grep -v local | sed -e '1d' -e '$d'
- done | sort -u
- echo ""
- if [ $first -eq 0 ]; then
- first=1;
- echo " local: *;";
- fi
- if [ "$prev_sym" = "none" ]; then
- echo "};";
- prev_sym=$s;
- else
- echo "} $prev_sym;";
- prev_sym=$s;
- fi
- echo ""
-done
diff --git a/scripts/test-build.sh b/scripts/test-build.sh
index f7ba1fbb..5bcecfc3 100755
--- a/scripts/test-build.sh
+++ b/scripts/test-build.sh
@@ -35,6 +35,7 @@ default_path=$PATH
# Load config options:
# - AESNI_MULTI_BUFFER_LIB_PATH
# - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2)
+# - DPDK_DEP_ARCHIVE
# - DPDK_DEP_CFLAGS
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n])
@@ -44,7 +45,8 @@ default_path=$PATH
# - DPDK_DEP_ZLIB (y/[n])
# - DPDK_MAKE_JOBS (int)
# - DPDK_NOTIFY (notify-send)
-# - LIBSSO_PATH
+# - LIBSSO_SNOW3G_PATH
+# - LIBSSO_KASUMI_PATH
. $(dirname $(readlink -e $0))/load-devel-config.sh
print_usage () {
@@ -61,6 +63,7 @@ print_help () {
-h this help
-jX use X parallel jobs in "make"
-s short test with only first config without examples/doc
+ -v verbose build
config: defconfig[[~][+]option1[[~][+]option2...]]
Example: x86_64-native-linuxapp-gcc+debug~RXTX_CALLBACKS
@@ -111,6 +114,7 @@ reset_env ()
{
export PATH=$default_path
unset CROSS
+ unset DPDK_DEP_ARCHIVE
unset DPDK_DEP_CFLAGS
unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MOFED
@@ -119,13 +123,19 @@ reset_env ()
unset DPDK_DEP_SZE
unset DPDK_DEP_ZLIB
unset AESNI_MULTI_BUFFER_LIB_PATH
- unset LIBSSO_PATH
+ unset LIBSSO_SNOW3G_PATH
+ unset LIBSSO_KASUMI_PATH
unset PQOS_INSTALL_PATH
}
config () # <directory> <target> <options>
{
- if [ ! -e $1/.config ] ; then
+ reconfig=false
+ if git rev-parse 2>&- && [ -n "$(git diff HEAD~ -- config)" ] ; then
+ echo 'Default config may have changed'
+ reconfig=true
+ fi
+ if [ ! -e $1/.config ] || $reconfig ; then
echo "================== Configure $1"
make T=$2 O=$1 config
@@ -138,6 +148,7 @@ config () # <directory> <target> <options>
! echo $3 | grep -q '+shared' || \
sed -ri 's,(SHARED_LIB=)n,\1y,' $1/.config
! echo $3 | grep -q '+debug' || ( \
+ sed -ri 's,(RTE_LOG_LEVEL=).*,\1RTE_LOG_DEBUG,' $1/.config
sed -ri 's,(_DEBUG.*=)n,\1y,' $1/.config
sed -ri 's,(_STAT.*=)n,\1y,' $1/.config
sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )
@@ -148,12 +159,16 @@ config () # <directory> <target> <options>
sed -ri 's,(PCI_CONFIG=)n,\1y,' $1/.config
sed -ri 's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
sed -ri 's,(BYPASS=)n,\1y,' $1/.config
+ test "$DPDK_DEP_ARCHIVE" != y || \
+ sed -ri 's,(RESOURCE_TAR=)n,\1y,' $1/.config
test "$DPDK_DEP_MOFED" != y || \
sed -ri 's,(MLX._PMD=)n,\1y,' $1/.config
test "$DPDK_DEP_SZE" != y || \
sed -ri 's,(PMD_SZEDATA2=)n,\1y,' $1/.config
test "$DPDK_DEP_ZLIB" != y || \
sed -ri 's,(BNX2X_PMD=)n,\1y,' $1/.config
+ test "$DPDK_DEP_ZLIB" != y || \
+ sed -ri 's,(QEDE_PMD=)n,\1y,' $1/.config
sed -ri 's,(NFP_PMD=)n,\1y,' $1/.config
test "$DPDK_DEP_PCAP" != y || \
sed -ri 's,(PCAP=)n,\1y,' $1/.config
@@ -161,8 +176,10 @@ config () # <directory> <target> <options>
sed -ri 's,(PMD_AESNI_MB=)n,\1y,' $1/.config
test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
sed -ri 's,(PMD_AESNI_GCM=)n,\1y,' $1/.config
- test -z "$LIBSSO_PATH" || \
+ test -z "$LIBSSO_SNOW3G_PATH" || \
sed -ri 's,(PMD_SNOW3G=)n,\1y,' $1/.config
+ test -z "$LIBSSO_KASUMI_PATH" || \
+ sed -ri 's,(PMD_KASUMI=)n,\1y,' $1/.config
test "$DPDK_DEP_SSL" != y || \
sed -ri 's,(PMD_QAT=)n,\1y,' $1/.config
sed -ri 's,(KNI_VHOST.*=)n,\1y,' $1/.config
diff --git a/scripts/test-null.sh b/scripts/test-null.sh
index ef6d800d..32a47b17 100755
--- a/scripts/test-null.sh
+++ b/scripts/test-null.sh
@@ -36,7 +36,6 @@ build=${1:-build}
coremask=${2:-3} # default using cores 0 and 1
if grep -q SHARED_LIB=y $build/.config; then
- export LD_LIBRARY_PATH=$build/lib:$LD_LIBRARY_PATH
pmd='-d librte_pmd_null.so'
fi
diff --git a/tools/dpdk_nic_bind.py b/tools/dpdk_nic_bind.py
index 28eace33..b69ca2a0 100755
--- a/tools/dpdk_nic_bind.py
+++ b/tools/dpdk_nic_bind.py
@@ -38,8 +38,8 @@ import getopt
import subprocess
from os.path import exists, abspath, dirname, basename
-# The PCI device class for ETHERNET devices
-ETHERNET_CLASS = "0200"
+# The PCI base class for NETWORK devices
+NETWORK_BASE_CLASS = "02"
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
@@ -248,7 +248,7 @@ def get_nic_details():
dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
for dev_line in dev_lines:
if (len(dev_line) == 0):
- if dev["Class"] == ETHERNET_CLASS:
+ if dev["Class"][0:2] == NETWORK_BASE_CLASS:
# convert device and vendor ids to numbers, then add to global
dev["Vendor"] = int(dev["Vendor"], 16)
dev["Device"] = int(dev["Device"], 16)